blob: 33e92955e09d0ab7d202e3cb4d6fb154ddee5cbe [file] [log] [blame]
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -07001#include <linux/hardirq.h>
2
Thomas Gleixner66bcaf02009-08-20 09:59:09 +02003#include <asm/x86_init.h>
4
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -07005#include <xen/interface/xen.h>
6#include <xen/interface/sched.h>
7#include <xen/interface/vcpu.h>
Mukesh Rathor27713742013-12-11 15:36:51 -05008#include <xen/features.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00009#include <xen/events.h>
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070010
11#include <asm/xen/hypercall.h>
12#include <asm/xen/hypervisor.h>
13
14#include "xen-ops.h"
15
16/*
17 * Force a proper event-channel callback from Xen after clearing the
18 * callback mask. We do this in a very simple manner, by making a call
19 * down into Xen. The pending flag will be checked by Xen on return.
20 */
21void xen_force_evtchn_callback(void)
22{
23 (void)HYPERVISOR_xen_version(0, NULL);
24}
25
Andi Kleen2605fc22014-05-02 00:44:37 +020026asmlinkage __visible unsigned long xen_save_fl(void)
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070027{
28 struct vcpu_info *vcpu;
29 unsigned long flags;
30
Alex Shi2113f462012-01-13 23:53:35 +080031 vcpu = this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070032
33 /* flag has opposite sense of mask */
34 flags = !vcpu->evtchn_upcall_mask;
35
36 /* convert to IF type flag
37 -0 -> 0x00000000
38 -1 -> 0xffffffff
39 */
40 return (-flags) & X86_EFLAGS_IF;
41}
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -080042PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070043
Andi Kleena2e7f0e2013-10-22 09:07:56 -070044__visible void xen_restore_fl(unsigned long flags)
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070045{
46 struct vcpu_info *vcpu;
47
48 /* convert from IF type flag */
49 flags = !(flags & X86_EFLAGS_IF);
50
David Vrabelfb58e302013-08-15 13:21:04 +010051 /* See xen_irq_enable() for why preemption must be disabled. */
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070052 preempt_disable();
Alex Shi2113f462012-01-13 23:53:35 +080053 vcpu = this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070054 vcpu->evtchn_upcall_mask = flags;
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070055
56 if (flags == 0) {
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070057 barrier(); /* unmask then check (avoid races) */
58 if (unlikely(vcpu->evtchn_upcall_pending))
59 xen_force_evtchn_callback();
David Vrabelfb58e302013-08-15 13:21:04 +010060 preempt_enable();
61 } else
62 preempt_enable_no_resched();
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070063}
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -080064PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070065
Andi Kleen2605fc22014-05-02 00:44:37 +020066asmlinkage __visible void xen_irq_disable(void)
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070067{
68 /* There's a one instruction preempt window here. We need to
69 make sure we're don't switch CPUs between getting the vcpu
70 pointer and updating the mask. */
71 preempt_disable();
Alex Shi2113f462012-01-13 23:53:35 +080072 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070073 preempt_enable_no_resched();
74}
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -080075PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070076
Andi Kleen2605fc22014-05-02 00:44:37 +020077asmlinkage __visible void xen_irq_enable(void)
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070078{
79 struct vcpu_info *vcpu;
80
David Vrabelfb58e302013-08-15 13:21:04 +010081 /*
82 * We may be preempted as soon as vcpu->evtchn_upcall_mask is
83 * cleared, so disable preemption to ensure we check for
84 * events on the VCPU we are still running on.
85 */
86 preempt_disable();
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070087
Alex Shi2113f462012-01-13 23:53:35 +080088 vcpu = this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070089 vcpu->evtchn_upcall_mask = 0;
90
91 /* Doesn't matter if we get preempted here, because any
92 pending event will get dealt with anyway. */
93
94 barrier(); /* unmask then check (avoid races) */
95 if (unlikely(vcpu->evtchn_upcall_pending))
96 xen_force_evtchn_callback();
David Vrabelfb58e302013-08-15 13:21:04 +010097
98 preempt_enable();
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -070099}
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800100PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700101
102static void xen_safe_halt(void)
103{
104 /* Blocking includes an implicit local_irq_enable(). */
105 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
106 BUG();
107}
108
109static void xen_halt(void)
110{
111 if (irqs_disabled())
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200112 HYPERVISOR_vcpu_op(VCPUOP_down,
113 xen_vcpu_nr(smp_processor_id()), NULL);
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700114 else
115 xen_safe_halt();
116}
117
Daniel Kiper251511a2011-05-04 20:16:07 +0200118static const struct pv_irq_ops xen_irq_ops __initconst = {
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800119 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
120 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
121 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
122 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
123
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700124 .safe_halt = xen_safe_halt,
125 .halt = xen_halt,
126#ifdef CONFIG_X86_64
127 .adjust_exception_frame = xen_adjust_exception_frame,
128#endif
129};
130
Randy Dunlap7d81c3b2011-01-08 20:00:36 -0800131void __init xen_init_irq_ops(void)
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700132{
Mukesh Rathor27713742013-12-11 15:36:51 -0500133 /* For PVH we use default pv_irq_ops settings. */
134 if (!xen_feature(XENFEAT_hvm_callback_vector))
135 pv_irq_ops = xen_irq_ops;
Thomas Gleixner66bcaf02009-08-20 09:59:09 +0200136 x86_init.irqs.intr_init = xen_init_IRQ;
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700137}