Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 1 | /* |
| 2 | Asm versions of Xen pv-ops, suitable for either direct use or inlining. |
| 3 | The inline versions are the same as the direct-use versions, with the |
| 4 | pre- and post-amble chopped off. |
| 5 | |
| 6 | This code is encoded for size rather than absolute efficiency, |
| 7 | with a view to being able to inline as much as possible. |
| 8 | |
| 9 | We only bother with direct forms (ie, vcpu in pda) of the operations |
| 10 | here; the indirect forms are better handled in C, since they're |
| 11 | generally too large to inline anyway. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/linkage.h> |
| 15 | |
| 16 | #include <asm/asm-offsets.h> |
| 17 | #include <asm/processor-flags.h> |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 18 | #include <asm/errno.h> |
| 19 | #include <asm/segment.h> |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 20 | |
| 21 | #include <xen/interface/xen.h> |
| 22 | |
| 23 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v |
| 24 | #define ENDPATCH(x) .globl x##_end; x##_end=. |
| 25 | |
| 26 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ |
| 27 | #define XEN_EFLAGS_NMI 0x80000000 |
| 28 | |
| 29 | #if 0 |
| 30 | #include <asm/percpu.h> |
| 31 | |
| 32 | /* |
| 33 | Enable events. This clears the event mask and tests the pending |
| 34 | event status with one and operation. If there are pending |
| 35 | events, then enter the hypervisor to get them handled. |
| 36 | */ |
| 37 | ENTRY(xen_irq_enable_direct) |
| 38 | /* Unmask events */ |
| 39 | movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
| 40 | |
| 41 | /* Preempt here doesn't matter because that will deal with |
| 42 | any pending interrupts. The pending check may end up being |
| 43 | run on the wrong CPU, but that doesn't hurt. */ |
| 44 | |
| 45 | /* Test for pending */ |
| 46 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) |
| 47 | jz 1f |
| 48 | |
| 49 | 2: call check_events |
| 50 | 1: |
| 51 | ENDPATCH(xen_irq_enable_direct) |
| 52 | ret |
| 53 | ENDPROC(xen_irq_enable_direct) |
| 54 | RELOC(xen_irq_enable_direct, 2b+1) |
| 55 | |
| 56 | /* |
| 57 | Disabling events is simply a matter of making the event mask |
| 58 | non-zero. |
| 59 | */ |
| 60 | ENTRY(xen_irq_disable_direct) |
| 61 | movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
| 62 | ENDPATCH(xen_irq_disable_direct) |
| 63 | ret |
| 64 | ENDPROC(xen_irq_disable_direct) |
| 65 | RELOC(xen_irq_disable_direct, 0) |
| 66 | |
| 67 | /* |
| 68 | (xen_)save_fl is used to get the current interrupt enable status. |
| 69 | Callers expect the status to be in X86_EFLAGS_IF, and other bits |
| 70 | may be set in the return value. We take advantage of this by |
| 71 | making sure that X86_EFLAGS_IF has the right value (and other bits |
| 72 | in that byte are 0), but other bits in the return value are |
| 73 | undefined. We need to toggle the state of the bit, because |
| 74 | Xen and x86 use opposite senses (mask vs enable). |
| 75 | */ |
| 76 | ENTRY(xen_save_fl_direct) |
| 77 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
| 78 | setz %ah |
| 79 | addb %ah,%ah |
| 80 | ENDPATCH(xen_save_fl_direct) |
| 81 | ret |
| 82 | ENDPROC(xen_save_fl_direct) |
| 83 | RELOC(xen_save_fl_direct, 0) |
| 84 | |
| 85 | /* |
| 86 | In principle the caller should be passing us a value return |
| 87 | from xen_save_fl_direct, but for robustness sake we test only |
| 88 | the X86_EFLAGS_IF flag rather than the whole byte. After |
| 89 | setting the interrupt mask state, it checks for unmasked |
| 90 | pending events and enters the hypervisor to get them delivered |
| 91 | if so. |
| 92 | */ |
| 93 | ENTRY(xen_restore_fl_direct) |
| 94 | testb $X86_EFLAGS_IF>>8, %ah |
| 95 | setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
| 96 | /* Preempt here doesn't matter because that will deal with |
| 97 | any pending interrupts. The pending check may end up being |
| 98 | run on the wrong CPU, but that doesn't hurt. */ |
| 99 | |
| 100 | /* check for unmasked and pending */ |
| 101 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) |
| 102 | jz 1f |
| 103 | 2: call check_events |
| 104 | 1: |
| 105 | ENDPATCH(xen_restore_fl_direct) |
| 106 | ret |
| 107 | ENDPROC(xen_restore_fl_direct) |
| 108 | RELOC(xen_restore_fl_direct, 2b+1) |
| 109 | |
| 110 | |
| 111 | /* |
| 112 | Force an event check by making a hypercall, |
| 113 | but preserve regs before making the call. |
| 114 | */ |
| 115 | check_events: |
| 116 | push %rax |
| 117 | push %rcx |
| 118 | push %rdx |
| 119 | push %rsi |
| 120 | push %rdi |
| 121 | push %r8 |
| 122 | push %r9 |
| 123 | push %r10 |
| 124 | push %r11 |
| 125 | call force_evtchn_callback |
| 126 | pop %r11 |
| 127 | pop %r10 |
| 128 | pop %r9 |
| 129 | pop %r8 |
| 130 | pop %rdi |
| 131 | pop %rsi |
| 132 | pop %rdx |
| 133 | pop %rcx |
| 134 | pop %rax |
| 135 | ret |
| 136 | #endif |
| 137 | |
Jeremy Fitzhardinge | 997409d | 2008-07-08 15:07:00 -0700 | [diff] [blame] | 138 | ENTRY(xen_adjust_exception_frame) |
| 139 | mov 8+0(%rsp),%rcx |
| 140 | mov 8+8(%rsp),%r11 |
| 141 | ret $16 |
| 142 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 143 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
| 144 | /* |
| 145 | Xen64 iret frame: |
| 146 | |
| 147 | ss |
| 148 | rsp |
| 149 | rflags |
| 150 | cs |
| 151 | rip <-- standard iret frame |
| 152 | |
| 153 | flags |
| 154 | |
| 155 | rcx } |
| 156 | r11 }<-- pushed by hypercall page |
| 157 | rsp -> rax } |
| 158 | */ |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 159 | ENTRY(xen_iret) |
| 160 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 161 | 1: jmp hypercall_iret |
| 162 | ENDPATCH(xen_iret) |
| 163 | RELOC(xen_iret, 1b+1) |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 164 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 165 | /* |
| 166 | sysexit is not used for 64-bit processes, so it's |
| 167 | only ever used to return to 32-bit compat userspace. |
| 168 | */ |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 169 | ENTRY(xen_sysexit) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 170 | pushq $__USER32_DS |
| 171 | pushq %rcx |
| 172 | pushq $X86_EFLAGS_IF |
| 173 | pushq $__USER32_CS |
| 174 | pushq %rdx |
| 175 | |
Jeremy Fitzhardinge | 2dc1697 | 2008-07-21 16:49:58 -0700 | [diff] [blame^] | 176 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 177 | 1: jmp hypercall_iret |
| 178 | ENDPATCH(xen_sysexit) |
| 179 | RELOC(xen_sysexit, 1b+1) |
| 180 | |
| 181 | ENTRY(xen_sysret64) |
| 182 | /* We're already on the usermode stack at this point, but still |
| 183 | with the kernel gs, so we can easily switch back */ |
| 184 | movq %rsp, %gs:pda_oldrsp |
| 185 | movq %gs:pda_kernelstack,%rsp |
| 186 | |
| 187 | pushq $__USER_DS |
| 188 | pushq %gs:pda_oldrsp |
| 189 | pushq %r11 |
| 190 | pushq $__USER_CS |
| 191 | pushq %rcx |
| 192 | |
| 193 | pushq $VGCF_in_syscall |
| 194 | 1: jmp hypercall_iret |
| 195 | ENDPATCH(xen_sysret64) |
| 196 | RELOC(xen_sysret64, 1b+1) |
| 197 | |
| 198 | ENTRY(xen_sysret32) |
| 199 | /* We're already on the usermode stack at this point, but still |
| 200 | with the kernel gs, so we can easily switch back */ |
| 201 | movq %rsp, %gs:pda_oldrsp |
| 202 | movq %gs:pda_kernelstack, %rsp |
| 203 | |
| 204 | pushq $__USER32_DS |
| 205 | pushq %gs:pda_oldrsp |
| 206 | pushq %r11 |
| 207 | pushq $__USER32_CS |
| 208 | pushq %rcx |
| 209 | |
| 210 | pushq $VGCF_in_syscall |
| 211 | 1: jmp hypercall_iret |
| 212 | ENDPATCH(xen_sysret32) |
| 213 | RELOC(xen_sysret32, 1b+1) |
| 214 | |
| 215 | /* |
| 216 | Xen handles syscall callbacks much like ordinary exceptions, |
| 217 | which means we have: |
| 218 | - kernel gs |
| 219 | - kernel rsp |
| 220 | - an iret-like stack frame on the stack (including rcx and r11): |
| 221 | ss |
| 222 | rsp |
| 223 | rflags |
| 224 | cs |
| 225 | rip |
| 226 | r11 |
| 227 | rsp-> rcx |
| 228 | |
| 229 | In all the entrypoints, we undo all that to make it look |
| 230 | like a CPU-generated syscall/sysenter and jump to the normal |
| 231 | entrypoint. |
| 232 | */ |
| 233 | |
| 234 | .macro undo_xen_syscall |
| 235 | mov 0*8(%rsp),%rcx |
| 236 | mov 1*8(%rsp),%r11 |
| 237 | mov 5*8(%rsp),%rsp |
| 238 | .endm |
| 239 | |
| 240 | /* Normal 64-bit system call target */ |
| 241 | ENTRY(xen_syscall_target) |
| 242 | undo_xen_syscall |
| 243 | jmp system_call_after_swapgs |
| 244 | ENDPROC(xen_syscall_target) |
| 245 | |
| 246 | #ifdef CONFIG_IA32_EMULATION |
| 247 | |
| 248 | /* 32-bit compat syscall target */ |
| 249 | ENTRY(xen_syscall32_target) |
| 250 | undo_xen_syscall |
| 251 | jmp ia32_cstar_target |
| 252 | ENDPROC(xen_syscall32_target) |
| 253 | |
| 254 | /* 32-bit compat sysenter target */ |
| 255 | ENTRY(xen_sysenter_target) |
| 256 | undo_xen_syscall |
| 257 | jmp ia32_sysenter_target |
| 258 | ENDPROC(xen_sysenter_target) |
| 259 | |
| 260 | #else /* !CONFIG_IA32_EMULATION */ |
| 261 | |
| 262 | ENTRY(xen_syscall32_target) |
| 263 | ENTRY(xen_sysenter_target) |
| 264 | lea 16(%rsp), %rsp /* strip %rcx,%r11 */ |
| 265 | mov $-ENOSYS, %rax |
| 266 | pushq $VGCF_in_syscall |
| 267 | jmp hypercall_iret |
| 268 | ENDPROC(xen_syscall32_target) |
| 269 | ENDPROC(xen_sysenter_target) |
| 270 | |
| 271 | #endif /* CONFIG_IA32_EMULATION */ |