blob: dc4d36d51bc15740f99bd5daf63261323c5405e0 [file] [log] [blame]
Jeremy Fitzhardinge64876732007-07-17 18:37:07 -07001/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in pda) of the operations
10 here; the indirect forms are better handled in C, since they're
11 generally too large to inline anyway.
12 */
13
14#include <linux/linkage.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
17#include <asm/percpu.h>
18#include <asm/asm-offsets.h>
19#include <asm/processor-flags.h>
20
21#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
22#define ENDPATCH(x) .globl x##_end; x##_end=.
23
24/*
25 Enable events. This clears the event mask and tests the pending
26 event status with one and operation. If there are pending
27 events, then enter the hypervisor to get them handled.
28 */
29ENTRY(xen_irq_enable_direct)
30 /* Clear mask and test pending */
31 andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
32 /* Preempt here doesn't matter because that will deal with
33 any pending interrupts. The pending check may end up being
34 run on the wrong CPU, but that doesn't hurt. */
35 jz 1f
362: call check_events
371:
38ENDPATCH(xen_irq_enable_direct)
39 ret
40 ENDPROC(xen_irq_enable_direct)
41 RELOC(xen_irq_enable_direct, 2b+1)
42
43
44/*
45 Disabling events is simply a matter of making the event mask
46 non-zero.
47 */
48ENTRY(xen_irq_disable_direct)
49 movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
50ENDPATCH(xen_irq_disable_direct)
51 ret
52 ENDPROC(xen_irq_disable_direct)
53 RELOC(xen_irq_disable_direct, 0)
54
55/*
56 (xen_)save_fl is used to get the current interrupt enable status.
57 Callers expect the status to be in X86_EFLAGS_IF, and other bits
58 may be set in the return value. We take advantage of this by
59 making sure that X86_EFLAGS_IF has the right value (and other bits
60 in that byte are 0), but other bits in the return value are
61 undefined. We need to toggle the state of the bit, because
62 Xen and x86 use opposite senses (mask vs enable).
63 */
64ENTRY(xen_save_fl_direct)
65 testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
66 setz %ah
67 addb %ah,%ah
68ENDPATCH(xen_save_fl_direct)
69 ret
70 ENDPROC(xen_save_fl_direct)
71 RELOC(xen_save_fl_direct, 0)
72
73
74/*
75 In principle the caller should be passing us a value return
76 from xen_save_fl_direct, but for robustness sake we test only
77 the X86_EFLAGS_IF flag rather than the whole byte. After
78 setting the interrupt mask state, it checks for unmasked
79 pending events and enters the hypervisor to get them delivered
80 if so.
81 */
82ENTRY(xen_restore_fl_direct)
83 testb $X86_EFLAGS_IF>>8, %ah
84 setz %al
85 movb %al, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
86 /* Preempt here doesn't matter because that will deal with
87 any pending interrupts. The pending check may end up being
88 run on the wrong CPU, but that doesn't hurt. */
89
90 /* check for pending but unmasked */
91 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
92 jz 1f
932: call check_events
941:
95ENDPATCH(xen_restore_fl_direct)
96 ret
97 ENDPROC(xen_restore_fl_direct)
98 RELOC(xen_restore_fl_direct, 2b+1)
99
100
101
102/*
103 Force an event check by making a hypercall,
104 but preserve regs before making the call.
105 */
106check_events:
107 push %eax
108 push %ecx
109 push %edx
110 call force_evtchn_callback
111 pop %edx
112 pop %ecx
113 pop %eax
114 ret