Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 1 | #include <linux/hardirq.h> |
| 2 | |
| 3 | #include <xen/interface/xen.h> |
| 4 | #include <xen/interface/sched.h> |
| 5 | #include <xen/interface/vcpu.h> |
| 6 | |
| 7 | #include <asm/xen/hypercall.h> |
| 8 | #include <asm/xen/hypervisor.h> |
| 9 | |
| 10 | #include "xen-ops.h" |
| 11 | |
| 12 | /* |
| 13 | * Force a proper event-channel callback from Xen after clearing the |
| 14 | * callback mask. We do this in a very simple manner, by making a call |
| 15 | * down into Xen. The pending flag will be checked by Xen on return. |
| 16 | */ |
| 17 | void xen_force_evtchn_callback(void) |
| 18 | { |
| 19 | (void)HYPERVISOR_xen_version(0, NULL); |
| 20 | } |
| 21 | |
| 22 | static void __init __xen_init_IRQ(void) |
| 23 | { |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 24 | int i; |
| 25 | |
| 26 | /* Create identity vector->irq map */ |
| 27 | for(i = 0; i < NR_VECTORS; i++) { |
| 28 | int cpu; |
| 29 | |
| 30 | for_each_possible_cpu(cpu) |
| 31 | per_cpu(vector_irq, cpu)[i] = i; |
| 32 | } |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 33 | |
| 34 | xen_init_IRQ(); |
| 35 | } |
| 36 | |
| 37 | static unsigned long xen_save_fl(void) |
| 38 | { |
| 39 | struct vcpu_info *vcpu; |
| 40 | unsigned long flags; |
| 41 | |
Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame^] | 42 | vcpu = percpu_read(xen_vcpu); |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 43 | |
| 44 | /* flag has opposite sense of mask */ |
| 45 | flags = !vcpu->evtchn_upcall_mask; |
| 46 | |
| 47 | /* convert to IF type flag |
| 48 | -0 -> 0x00000000 |
| 49 | -1 -> 0xffffffff |
| 50 | */ |
| 51 | return (-flags) & X86_EFLAGS_IF; |
| 52 | } |
| 53 | |
| 54 | static void xen_restore_fl(unsigned long flags) |
| 55 | { |
| 56 | struct vcpu_info *vcpu; |
| 57 | |
| 58 | /* convert from IF type flag */ |
| 59 | flags = !(flags & X86_EFLAGS_IF); |
| 60 | |
| 61 | /* There's a one instruction preempt window here. We need to |
| 62 | make sure we're don't switch CPUs between getting the vcpu |
| 63 | pointer and updating the mask. */ |
| 64 | preempt_disable(); |
Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame^] | 65 | vcpu = percpu_read(xen_vcpu); |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 66 | vcpu->evtchn_upcall_mask = flags; |
| 67 | preempt_enable_no_resched(); |
| 68 | |
| 69 | /* Doesn't matter if we get preempted here, because any |
| 70 | pending event will get dealt with anyway. */ |
| 71 | |
| 72 | if (flags == 0) { |
| 73 | preempt_check_resched(); |
| 74 | barrier(); /* unmask then check (avoid races) */ |
| 75 | if (unlikely(vcpu->evtchn_upcall_pending)) |
| 76 | xen_force_evtchn_callback(); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | static void xen_irq_disable(void) |
| 81 | { |
| 82 | /* There's a one instruction preempt window here. We need to |
| 83 | make sure we're don't switch CPUs between getting the vcpu |
| 84 | pointer and updating the mask. */ |
| 85 | preempt_disable(); |
Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame^] | 86 | percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 87 | preempt_enable_no_resched(); |
| 88 | } |
| 89 | |
| 90 | static void xen_irq_enable(void) |
| 91 | { |
| 92 | struct vcpu_info *vcpu; |
| 93 | |
| 94 | /* We don't need to worry about being preempted here, since |
| 95 | either a) interrupts are disabled, so no preemption, or b) |
| 96 | the caller is confused and is trying to re-enable interrupts |
| 97 | on an indeterminate processor. */ |
| 98 | |
Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame^] | 99 | vcpu = percpu_read(xen_vcpu); |
Jeremy Fitzhardinge | 0d1edf4 | 2008-07-28 11:53:57 -0700 | [diff] [blame] | 100 | vcpu->evtchn_upcall_mask = 0; |
| 101 | |
| 102 | /* Doesn't matter if we get preempted here, because any |
| 103 | pending event will get dealt with anyway. */ |
| 104 | |
| 105 | barrier(); /* unmask then check (avoid races) */ |
| 106 | if (unlikely(vcpu->evtchn_upcall_pending)) |
| 107 | xen_force_evtchn_callback(); |
| 108 | } |
| 109 | |
| 110 | static void xen_safe_halt(void) |
| 111 | { |
| 112 | /* Blocking includes an implicit local_irq_enable(). */ |
| 113 | if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) |
| 114 | BUG(); |
| 115 | } |
| 116 | |
| 117 | static void xen_halt(void) |
| 118 | { |
| 119 | if (irqs_disabled()) |
| 120 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
| 121 | else |
| 122 | xen_safe_halt(); |
| 123 | } |
| 124 | |
| 125 | static const struct pv_irq_ops xen_irq_ops __initdata = { |
| 126 | .init_IRQ = __xen_init_IRQ, |
| 127 | .save_fl = xen_save_fl, |
| 128 | .restore_fl = xen_restore_fl, |
| 129 | .irq_disable = xen_irq_disable, |
| 130 | .irq_enable = xen_irq_enable, |
| 131 | .safe_halt = xen_safe_halt, |
| 132 | .halt = xen_halt, |
| 133 | #ifdef CONFIG_X86_64 |
| 134 | .adjust_exception_frame = xen_adjust_exception_frame, |
| 135 | #endif |
| 136 | }; |
| 137 | |
| 138 | void __init xen_init_irq_ops() |
| 139 | { |
| 140 | pv_irq_ops = xen_irq_ops; |
| 141 | } |