blob: eff224df813fdd88092b16d2942aaa000b815e97 [file] [log] [blame]
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08001/*
Tejun Heo130ace12009-02-06 00:57:48 +09002 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
5 *
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
8 *
9 * We only bother with direct forms (ie, vcpu in percpu data) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080012 */
13
14#include <asm/asm-offsets.h>
15#include <asm/percpu.h>
16#include <asm/processor-flags.h>
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060017#include <asm/frame.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080018
19#include "xen-asm.h"
20
21/*
Tejun Heo130ace12009-02-06 00:57:48 +090022 * Enable events. This clears the event mask and tests the pending
23 * event status with one and operation. If there are pending events,
24 * then enter the hypervisor to get them handled.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080025 */
26ENTRY(xen_irq_enable_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060027 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080028 /* Unmask events */
29 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
30
Tejun Heo130ace12009-02-06 00:57:48 +090031 /*
32 * Preempt here doesn't matter because that will deal with any
33 * pending interrupts. The pending check may end up being run
34 * on the wrong CPU, but that doesn't hurt.
35 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080036
37 /* Test for pending */
38 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
39 jz 1f
40
412: call check_events
421:
43ENDPATCH(xen_irq_enable_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060044 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080045 ret
46 ENDPROC(xen_irq_enable_direct)
47 RELOC(xen_irq_enable_direct, 2b+1)
48
49
50/*
Tejun Heo130ace12009-02-06 00:57:48 +090051 * Disabling events is simply a matter of making the event mask
52 * non-zero.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080053 */
54ENTRY(xen_irq_disable_direct)
55 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
56ENDPATCH(xen_irq_disable_direct)
57 ret
58 ENDPROC(xen_irq_disable_direct)
59 RELOC(xen_irq_disable_direct, 0)
60
61/*
Tejun Heo130ace12009-02-06 00:57:48 +090062 * (xen_)save_fl is used to get the current interrupt enable status.
63 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
64 * may be set in the return value. We take advantage of this by
65 * making sure that X86_EFLAGS_IF has the right value (and other bits
66 * in that byte are 0), but other bits in the return value are
67 * undefined. We need to toggle the state of the bit, because Xen and
68 * x86 use opposite senses (mask vs enable).
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080069 */
70ENTRY(xen_save_fl_direct)
71 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
72 setz %ah
Tejun Heo130ace12009-02-06 00:57:48 +090073 addb %ah, %ah
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080074ENDPATCH(xen_save_fl_direct)
75 ret
76 ENDPROC(xen_save_fl_direct)
77 RELOC(xen_save_fl_direct, 0)
78
79
80/*
Tejun Heo130ace12009-02-06 00:57:48 +090081 * In principle the caller should be passing us a value return from
82 * xen_save_fl_direct, but for robustness sake we test only the
83 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
84 * interrupt mask state, it checks for unmasked pending events and
85 * enters the hypervisor to get them delivered if so.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080086 */
87ENTRY(xen_restore_fl_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060088 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080089#ifdef CONFIG_X86_64
90 testw $X86_EFLAGS_IF, %di
91#else
92 testb $X86_EFLAGS_IF>>8, %ah
93#endif
94 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Tejun Heo130ace12009-02-06 00:57:48 +090095 /*
96 * Preempt here doesn't matter because that will deal with any
97 * pending interrupts. The pending check may end up being run
98 * on the wrong CPU, but that doesn't hurt.
99 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800100
101 /* check for unmasked and pending */
102 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
David Vrabel7eb7ce42012-04-26 19:44:06 +0100103 jnz 1f
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08001042: call check_events
1051:
106ENDPATCH(xen_restore_fl_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600107 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800108 ret
109 ENDPROC(xen_restore_fl_direct)
110 RELOC(xen_restore_fl_direct, 2b+1)
111
112
113/*
Tejun Heo130ace12009-02-06 00:57:48 +0900114 * Force an event check by making a hypercall, but preserve regs
115 * before making the call.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800116 */
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600117ENTRY(check_events)
118 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800119#ifdef CONFIG_X86_32
120 push %eax
121 push %ecx
122 push %edx
123 call xen_force_evtchn_callback
124 pop %edx
125 pop %ecx
126 pop %eax
127#else
128 push %rax
129 push %rcx
130 push %rdx
131 push %rsi
132 push %rdi
133 push %r8
134 push %r9
135 push %r10
136 push %r11
137 call xen_force_evtchn_callback
138 pop %r11
139 pop %r10
140 pop %r9
141 pop %r8
142 pop %rdi
143 pop %rsi
144 pop %rdx
145 pop %rcx
146 pop %rax
147#endif
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600148 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800149 ret
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600150ENDPROC(check_events)