blob: 6e075cdd0c6bf56ff8daacf986d443de23c49998 [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080029#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040031#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010032#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070033
Sheng Yang38e20b02010-05-14 12:40:51 +010034#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035#include <asm/ptrace.h>
36#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080037#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040038#include <asm/io_apic.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070039#include <asm/sync_bitops.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010040#include <asm/xen/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070042#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070043
Sheng Yang38e20b02010-05-14 12:40:51 +010044#include <xen/xen.h>
45#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070046#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070047#include <xen/events.h>
48#include <xen/interface/xen.h>
49#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010050#include <xen/interface/hvm/hvm_op.h>
51#include <xen/interface/hvm/params.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070052
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053/*
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -040057static DEFINE_MUTEX(irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070058
Ian Campbell6cb65372011-03-10 16:08:11 +000059static LIST_HEAD(xen_irq_list_head);
60
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070061/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090062static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070063
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070064/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090065static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070066
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080067/* Interrupt types. */
68enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080069 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070070 IRQT_PIRQ,
71 IRQT_VIRQ,
72 IRQT_IPI,
73 IRQT_EVTCHN
74};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070075
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080076/*
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
Stefano Stabellini42a1de52010-06-24 16:42:04 +010082 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080084 * VIRQ - virq number
85 * IPI - IPI vector
86 * EVTCHN -
87 */
Ruslan Pisarev088c05a2011-07-26 14:16:13 +030088struct irq_info {
Ian Campbell6cb65372011-03-10 16:08:11 +000089 struct list_head list;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080090 enum xen_irq_type type; /* type */
Ian Campbell6cb65372011-03-10 16:08:11 +000091 unsigned irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080092 unsigned short evtchn; /* event channel */
93 unsigned short cpu; /* cpu bound */
94
95 union {
96 unsigned short virq;
97 enum ipi_vector ipi;
98 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +010099 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800100 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400101 unsigned char vector;
102 unsigned char flags;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400103 uint16_t domid;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800104 } pirq;
105 } u;
106};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400107#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400108#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800109
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400110static int *evtchn_to_irq;
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700111
Ian Campbellcb60d112011-03-10 16:08:08 +0000112static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
113 cpu_evtchn_mask);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700114
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700115/* Xen will never allocate port zero for any purpose. */
116#define VALID_EVTCHN(chn) ((chn) != 0)
117
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700118static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700119static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400120static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100121static void enable_dynirq(struct irq_data *data);
122static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700123
Ian Campbell9158c352011-03-10 16:08:09 +0000124/* Get info for IRQ */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800125static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700126{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100127 return irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700128}
129
Ian Campbell9158c352011-03-10 16:08:09 +0000130/* Constructors for packed IRQ information. */
131static void xen_irq_info_common_init(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000132 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000133 enum xen_irq_type type,
134 unsigned short evtchn,
135 unsigned short cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700136{
Ian Campbell9158c352011-03-10 16:08:09 +0000137
138 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
139
140 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000141 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000142 info->evtchn = evtchn;
143 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000144
145 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700146}
147
Ian Campbell9158c352011-03-10 16:08:09 +0000148static void xen_irq_info_evtchn_init(unsigned irq,
149 unsigned short evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700150{
Ian Campbell9158c352011-03-10 16:08:09 +0000151 struct irq_info *info = info_for_irq(irq);
152
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000153 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700154}
155
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000156static void xen_irq_info_ipi_init(unsigned cpu,
157 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000158 unsigned short evtchn,
159 enum ipi_vector ipi)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700160{
Ian Campbell9158c352011-03-10 16:08:09 +0000161 struct irq_info *info = info_for_irq(irq);
162
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000163 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000164
165 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000166
167 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700168}
169
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000170static void xen_irq_info_virq_init(unsigned cpu,
171 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000172 unsigned short evtchn,
173 unsigned short virq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700174{
Ian Campbell9158c352011-03-10 16:08:09 +0000175 struct irq_info *info = info_for_irq(irq);
176
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000177 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000178
179 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000180
181 per_cpu(virq_to_irq, cpu)[virq] = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000182}
183
184static void xen_irq_info_pirq_init(unsigned irq,
185 unsigned short evtchn,
186 unsigned short pirq,
187 unsigned short gsi,
188 unsigned short vector,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400189 uint16_t domid,
Ian Campbell9158c352011-03-10 16:08:09 +0000190 unsigned char flags)
191{
192 struct irq_info *info = info_for_irq(irq);
193
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000194 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000195
196 info->u.pirq.pirq = pirq;
197 info->u.pirq.gsi = gsi;
198 info->u.pirq.vector = vector;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400199 info->u.pirq.domid = domid;
Ian Campbell9158c352011-03-10 16:08:09 +0000200 info->u.pirq.flags = flags;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700201}
202
203/*
204 * Accessors for packed IRQ information.
205 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800206static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700207{
Joe Jin110e7c72011-01-07 14:50:12 +0800208 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
209 return 0;
210
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800211 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700212}
213
Ian Campbelld4c04532009-02-06 19:20:31 -0800214unsigned irq_from_evtchn(unsigned int evtchn)
215{
216 return evtchn_to_irq[evtchn];
217}
218EXPORT_SYMBOL_GPL(irq_from_evtchn);
219
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800220static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700221{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800222 struct irq_info *info = info_for_irq(irq);
223
224 BUG_ON(info == NULL);
225 BUG_ON(info->type != IRQT_IPI);
226
227 return info->u.ipi;
228}
229
230static unsigned virq_from_irq(unsigned irq)
231{
232 struct irq_info *info = info_for_irq(irq);
233
234 BUG_ON(info == NULL);
235 BUG_ON(info->type != IRQT_VIRQ);
236
237 return info->u.virq;
238}
239
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100240static unsigned pirq_from_irq(unsigned irq)
241{
242 struct irq_info *info = info_for_irq(irq);
243
244 BUG_ON(info == NULL);
245 BUG_ON(info->type != IRQT_PIRQ);
246
247 return info->u.pirq.pirq;
248}
249
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800250static enum xen_irq_type type_from_irq(unsigned irq)
251{
252 return info_for_irq(irq)->type;
253}
254
255static unsigned cpu_from_irq(unsigned irq)
256{
257 return info_for_irq(irq)->cpu;
258}
259
260static unsigned int cpu_from_evtchn(unsigned int evtchn)
261{
262 int irq = evtchn_to_irq[evtchn];
263 unsigned ret = 0;
264
265 if (irq != -1)
266 ret = cpu_from_irq(irq);
267
268 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700269}
270
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400271static bool pirq_needs_eoi(unsigned irq)
272{
273 struct irq_info *info = info_for_irq(irq);
274
275 BUG_ON(info->type != IRQT_PIRQ);
276
277 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
278}
279
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700280static inline unsigned long active_evtchns(unsigned int cpu,
281 struct shared_info *sh,
282 unsigned int idx)
283{
Ruslan Pisarev088c05a2011-07-26 14:16:13 +0300284 return sh->evtchn_pending[idx] &
Ian Campbellcb60d112011-03-10 16:08:08 +0000285 per_cpu(cpu_evtchn_mask, cpu)[idx] &
Ruslan Pisarev088c05a2011-07-26 14:16:13 +0300286 ~sh->evtchn_mask[idx];
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700287}
288
289static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
290{
291 int irq = evtchn_to_irq[chn];
292
293 BUG_ON(irq == -1);
294#ifdef CONFIG_SMP
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000295 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700296#endif
297
Ian Campbellcb60d112011-03-10 16:08:08 +0000298 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
299 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700300
Ian Campbellca62ce82011-03-10 16:08:12 +0000301 info_for_irq(irq)->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700302}
303
304static void init_evtchn_cpu_bindings(void)
305{
Jan Beulich1c6969e2010-11-16 14:55:33 -0800306 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700307#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000308 struct irq_info *info;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200309
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700310 /* By default all event channels notify CPU#0. */
Ian Campbell6cb65372011-03-10 16:08:11 +0000311 list_for_each_entry(info, &xen_irq_list_head, list) {
312 struct irq_desc *desc = irq_to_desc(info->irq);
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000313 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800314 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700315#endif
316
Jan Beulich1c6969e2010-11-16 14:55:33 -0800317 for_each_possible_cpu(i)
Ian Campbellcb60d112011-03-10 16:08:08 +0000318 memset(per_cpu(cpu_evtchn_mask, i),
319 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700320}
321
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700322static inline void clear_evtchn(int port)
323{
324 struct shared_info *s = HYPERVISOR_shared_info;
325 sync_clear_bit(port, &s->evtchn_pending[0]);
326}
327
328static inline void set_evtchn(int port)
329{
330 struct shared_info *s = HYPERVISOR_shared_info;
331 sync_set_bit(port, &s->evtchn_pending[0]);
332}
333
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700334static inline int test_evtchn(int port)
335{
336 struct shared_info *s = HYPERVISOR_shared_info;
337 return sync_test_bit(port, &s->evtchn_pending[0]);
338}
339
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700340
341/**
342 * notify_remote_via_irq - send event to remote end of event channel via irq
343 * @irq: irq of event channel to send event to
344 *
345 * Unlike notify_remote_via_evtchn(), this is safe to use across
346 * save/restore. Notifications on a broken connection are silently
347 * dropped.
348 */
349void notify_remote_via_irq(int irq)
350{
351 int evtchn = evtchn_from_irq(irq);
352
353 if (VALID_EVTCHN(evtchn))
354 notify_remote_via_evtchn(evtchn);
355}
356EXPORT_SYMBOL_GPL(notify_remote_via_irq);
357
358static void mask_evtchn(int port)
359{
360 struct shared_info *s = HYPERVISOR_shared_info;
361 sync_set_bit(port, &s->evtchn_mask[0]);
362}
363
364static void unmask_evtchn(int port)
365{
366 struct shared_info *s = HYPERVISOR_shared_info;
367 unsigned int cpu = get_cpu();
368
369 BUG_ON(!irqs_disabled());
370
371 /* Slow path (hypercall) if this is a non-local port. */
372 if (unlikely(cpu != cpu_from_evtchn(port))) {
373 struct evtchn_unmask unmask = { .port = port };
374 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
375 } else {
Christoph Lameter780f36d2010-12-06 11:16:29 -0600376 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700377
378 sync_clear_bit(port, &s->evtchn_mask[0]);
379
380 /*
381 * The following is basically the equivalent of
382 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
383 * the interrupt edge' if the channel is masked.
384 */
385 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
386 !sync_test_and_set_bit(port / BITS_PER_LONG,
387 &vcpu_info->evtchn_pending_sel))
388 vcpu_info->evtchn_upcall_pending = 1;
389 }
390
391 put_cpu();
392}
393
Ian Campbell6cb65372011-03-10 16:08:11 +0000394static void xen_irq_init(unsigned irq)
395{
396 struct irq_info *info;
Konrad Rzeszutek Wilkb5328cd2011-06-15 14:24:29 -0400397#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000398 struct irq_desc *desc = irq_to_desc(irq);
399
400 /* By default all event channels notify CPU#0. */
401 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400402#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000403
Ian Campbellca62ce82011-03-10 16:08:12 +0000404 info = kzalloc(sizeof(*info), GFP_KERNEL);
405 if (info == NULL)
406 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000407
408 info->type = IRQT_UNBOUND;
409
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100410 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000411
Ian Campbell6cb65372011-03-10 16:08:11 +0000412 list_add_tail(&info->list, &xen_irq_list_head);
413}
414
Ian Campbell7bee9762011-03-10 16:08:15 +0000415static int __must_check xen_allocate_irq_dynamic(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700416{
Ian Campbell89911502011-03-03 11:57:44 -0500417 int first = 0;
418 int irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700419
Ian Campbell89911502011-03-03 11:57:44 -0500420#ifdef CONFIG_X86_IO_APIC
421 /*
422 * For an HVM guest or domain 0 which see "real" (emulated or
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300423 * actual respectively) GSIs we allocate dynamic IRQs
Ian Campbell89911502011-03-03 11:57:44 -0500424 * e.g. those corresponding to event channels or MSIs
425 * etc. from the range above those "real" GSIs to avoid
426 * collisions.
Konrad Rzeszutek Wilkd1b758e2010-12-09 14:53:29 -0500427 */
Ian Campbell89911502011-03-03 11:57:44 -0500428 if (xen_initial_domain() || xen_hvm_domain())
429 first = get_nr_irqs_gsi();
430#endif
431
Ian Campbell89911502011-03-03 11:57:44 -0500432 irq = irq_alloc_desc_from(first, -1);
433
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400434 if (irq >= 0)
435 xen_irq_init(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800436
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700437 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400438}
439
Ian Campbell7bee9762011-03-10 16:08:15 +0000440static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000441{
442 int irq;
443
Ian Campbell89911502011-03-03 11:57:44 -0500444 /*
445 * A PV guest has no concept of a GSI (since it has no ACPI
446 * nor access to/knowledge of the physical APICs). Therefore
447 * all IRQs are dynamically allocated from the entire IRQ
448 * space.
449 */
450 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000451 return xen_allocate_irq_dynamic();
452
453 /* Legacy IRQ descriptors are already allocated by the arch. */
454 if (gsi < NR_IRQS_LEGACY)
Ian Campbell6cb65372011-03-10 16:08:11 +0000455 irq = gsi;
456 else
457 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000458
Ian Campbell6cb65372011-03-10 16:08:11 +0000459 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000460
461 return irq;
462}
463
464static void xen_free_irq(unsigned irq)
465{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100466 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000467
468 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000469
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100470 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000471
472 kfree(info);
473
Ian Campbell72146102011-02-03 09:49:35 +0000474 /* Legacy IRQ descriptors are managed by the arch. */
475 if (irq < NR_IRQS_LEGACY)
476 return;
477
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000478 irq_free_desc(irq);
479}
480
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400481static void pirq_query_unmask(int irq)
482{
483 struct physdev_irq_status_query irq_status;
484 struct irq_info *info = info_for_irq(irq);
485
486 BUG_ON(info->type != IRQT_PIRQ);
487
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100488 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400489 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
490 irq_status.flags = 0;
491
492 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
493 if (irq_status.flags & XENIRQSTAT_needs_eoi)
494 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
495}
496
497static bool probing_irq(int irq)
498{
499 struct irq_desc *desc = irq_to_desc(irq);
500
501 return desc && desc->action == NULL;
502}
503
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100504static void eoi_pirq(struct irq_data *data)
505{
506 int evtchn = evtchn_from_irq(data->irq);
507 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
508 int rc = 0;
509
510 irq_move_irq(data);
511
512 if (VALID_EVTCHN(evtchn))
513 clear_evtchn(evtchn);
514
515 if (pirq_needs_eoi(data->irq)) {
516 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
517 WARN_ON(rc);
518 }
519}
520
521static void mask_ack_pirq(struct irq_data *data)
522{
523 disable_dynirq(data);
524 eoi_pirq(data);
525}
526
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000527static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400528{
529 struct evtchn_bind_pirq bind_pirq;
530 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400532 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400533
534 BUG_ON(info->type != IRQT_PIRQ);
535
536 if (VALID_EVTCHN(evtchn))
537 goto out;
538
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100539 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400540 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400541 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
542 BIND_PIRQ__WILL_SHARE : 0;
543 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
544 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400545 if (!probing_irq(irq))
546 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
547 irq);
548 return 0;
549 }
550 evtchn = bind_pirq.port;
551
552 pirq_query_unmask(irq);
553
554 evtchn_to_irq[evtchn] = irq;
555 bind_evtchn_to_cpu(evtchn, 0);
556 info->evtchn = evtchn;
557
558out:
559 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100560 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400561
562 return 0;
563}
564
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000565static unsigned int startup_pirq(struct irq_data *data)
566{
567 return __startup_pirq(data->irq);
568}
569
570static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400571{
572 struct evtchn_close close;
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000573 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400574 struct irq_info *info = info_for_irq(irq);
575 int evtchn = evtchn_from_irq(irq);
576
577 BUG_ON(info->type != IRQT_PIRQ);
578
579 if (!VALID_EVTCHN(evtchn))
580 return;
581
582 mask_evtchn(evtchn);
583
584 close.port = evtchn;
585 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
586 BUG();
587
588 bind_evtchn_to_cpu(evtchn, 0);
589 evtchn_to_irq[evtchn] = -1;
590 info->evtchn = 0;
591}
592
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000593static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400594{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000595 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400596}
597
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000598static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400599{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100600 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400601}
602
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400603static int find_irq_by_gsi(unsigned gsi)
604{
Ian Campbell6cb65372011-03-10 16:08:11 +0000605 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400606
Ian Campbell6cb65372011-03-10 16:08:11 +0000607 list_for_each_entry(info, &xen_irq_list_head, list) {
608 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400609 continue;
610
Ian Campbell6cb65372011-03-10 16:08:11 +0000611 if (info->u.pirq.gsi == gsi)
612 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400613 }
614
615 return -1;
616}
617
Ian Campbell653378a2011-03-10 16:08:04 +0000618/*
619 * Do not make any assumptions regarding the relationship between the
620 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100621 *
622 * Note: We don't assign an event channel until the irq actually started
623 * up. Return an existing irq if we've already got one for the gsi.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100624 *
625 * Shareable implies level triggered, not shareable implies edge
626 * triggered here.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400627 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000628int xen_bind_pirq_gsi_to_irq(unsigned gsi,
629 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400630{
Ian Campbella0e18112011-03-10 16:08:03 +0000631 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400632 struct physdev_irq irq_op;
633
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400634 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400635
636 irq = find_irq_by_gsi(gsi);
637 if (irq != -1) {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100638 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400639 irq, gsi);
640 goto out; /* XXX need refcount? */
641 }
642
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000643 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000644 if (irq < 0)
645 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400646
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400647 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400648 irq_op.vector = 0;
649
650 /* Only the privileged domain can do this. For non-priv, the pcifront
651 * driver provides a PCI bus that does the call to do exactly
652 * this in the priv domain. */
653 if (xen_initial_domain() &&
654 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000655 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400656 irq = -ENOSPC;
657 goto out;
658 }
659
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400660 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
Ian Campbell9158c352011-03-10 16:08:09 +0000661 shareable ? PIRQ_SHAREABLE : 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400662
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100663 pirq_query_unmask(irq);
664 /* We try to use the handler with the appropriate semantic for the
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100665 * type of interrupt: if the interrupt is an edge triggered
666 * interrupt we use handle_edge_irq.
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100667 *
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100668 * On the other hand if the interrupt is level triggered we use
669 * handle_fasteoi_irq like the native code does for this kind of
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100670 * interrupts.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100671 *
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100672 * Depending on the Xen version, pirq_needs_eoi might return true
673 * not only for level triggered interrupts but for edge triggered
674 * interrupts too. In any case Xen always honors the eoi mechanism,
675 * not injecting any more pirqs of the same kind if the first one
676 * hasn't received an eoi yet. Therefore using the fasteoi handler
677 * is the right choice either way.
678 */
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100679 if (shareable)
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100680 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
681 handle_fasteoi_irq, name);
682 else
683 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
684 handle_edge_irq, name);
685
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400686out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400687 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400688
689 return irq;
690}
691
Qing Hef731e3ef2010-10-11 15:30:09 +0100692#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000693int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000694{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000695 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000696 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000697
Ian Campbellbf480d92011-02-18 16:43:32 +0000698 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000699 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000700
Ian Campbell5cad61a2011-02-18 16:43:31 +0000701 WARN_ONCE(rc == -ENOSYS,
702 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
703
704 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000705}
706
Ian Campbellbf480d92011-02-18 16:43:32 +0000707int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400708 int pirq, int vector, const char *name,
709 domid_t domid)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100710{
Ian Campbellbf480d92011-02-18 16:43:32 +0000711 int irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000712
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400713 mutex_lock(&irq_mapping_update_lock);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100714
Ian Campbell4b41df72011-02-18 16:43:29 +0000715 irq = xen_allocate_irq_dynamic();
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400716 if (irq < 0)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000717 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100718
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100719 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
720 name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100721
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400722 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700723 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000724 if (ret < 0)
725 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100726out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400727 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000728 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000729error_irq:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400730 mutex_unlock(&irq_mapping_update_lock);
Ian Campbellbf480d92011-02-18 16:43:32 +0000731 xen_free_irq(irq);
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400732 return ret;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100733}
Qing Hef731e3ef2010-10-11 15:30:09 +0100734#endif
735
Alex Nixonb5401a92010-03-18 16:31:34 -0400736int xen_destroy_irq(int irq)
737{
738 struct irq_desc *desc;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100739 struct physdev_unmap_pirq unmap_irq;
740 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400741 int rc = -ENOENT;
742
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400743 mutex_lock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400744
745 desc = irq_to_desc(irq);
746 if (!desc)
747 goto out;
748
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100749 if (xen_initial_domain()) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500750 unmap_irq.pirq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400751 unmap_irq.domid = info->u.pirq.domid;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100752 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500753 /* If another domain quits without making the pci_disable_msix
754 * call, the Xen hypervisor takes care of freeing the PIRQs
755 * (free_domain_pirqs).
756 */
757 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
758 printk(KERN_INFO "domain %d does not have %d anymore\n",
759 info->u.pirq.domid, info->u.pirq.pirq);
760 else if (rc) {
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100761 printk(KERN_WARNING "unmap irq failed %d\n", rc);
762 goto out;
763 }
764 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400765
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000766 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400767
768out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400769 mutex_unlock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400770 return rc;
771}
772
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000773int xen_irq_from_pirq(unsigned pirq)
774{
Ian Campbell69c358c2011-03-10 16:08:13 +0000775 int irq;
776
777 struct irq_info *info;
778
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400779 mutex_lock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000780
781 list_for_each_entry(info, &xen_irq_list_head, list) {
Konrad Rzeszutek Wilk9bb9efe2011-09-29 13:13:30 -0400782 if (info->type != IRQT_PIRQ)
Ian Campbell69c358c2011-03-10 16:08:13 +0000783 continue;
784 irq = info->irq;
785 if (info->u.pirq.pirq == pirq)
786 goto out;
787 }
788 irq = -1;
789out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400790 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000791
792 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000793}
794
Konrad Rzeszutek Wilke6197ac2011-02-24 14:20:12 -0500795
796int xen_pirq_from_irq(unsigned irq)
797{
798 return pirq_from_irq(irq);
799}
800EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700801int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700802{
803 int irq;
804
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400805 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700806
807 irq = evtchn_to_irq[evtchn];
808
809 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000810 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000811 if (irq == -1)
812 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700813
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100814 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100815 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700816
Ian Campbell9158c352011-03-10 16:08:09 +0000817 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700818 }
819
Ian Campbell7bee9762011-03-10 16:08:15 +0000820out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400821 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700822
823 return irq;
824}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700825EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700826
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700827static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
828{
829 struct evtchn_bind_ipi bind_ipi;
830 int evtchn, irq;
831
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400832 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700833
834 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800835
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700836 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000837 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700838 if (irq < 0)
839 goto out;
840
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100841 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700842 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700843
844 bind_ipi.vcpu = cpu;
845 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
846 &bind_ipi) != 0)
847 BUG();
848 evtchn = bind_ipi.port;
849
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000850 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700851
852 bind_evtchn_to_cpu(evtchn, cpu);
853 }
854
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700855 out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400856 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700857 return irq;
858}
859
Ian Campbell2e820f52009-02-09 12:05:50 -0800860static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
861 unsigned int remote_port)
862{
863 struct evtchn_bind_interdomain bind_interdomain;
864 int err;
865
866 bind_interdomain.remote_dom = remote_domain;
867 bind_interdomain.remote_port = remote_port;
868
869 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
870 &bind_interdomain);
871
872 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
873}
874
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200875static int find_virq(unsigned int virq, unsigned int cpu)
876{
877 struct evtchn_status status;
878 int port, rc = -ENOENT;
879
880 memset(&status, 0, sizeof(status));
881 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
882 status.dom = DOMID_SELF;
883 status.port = port;
884 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
885 if (rc < 0)
886 continue;
887 if (status.status != EVTCHNSTAT_virq)
888 continue;
889 if (status.u.virq == virq && status.vcpu == cpu) {
890 rc = port;
891 break;
892 }
893 }
894 return rc;
895}
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700896
Jeremy Fitzhardinge4fe7d5a2010-09-02 16:17:06 +0100897int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700898{
899 struct evtchn_bind_virq bind_virq;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200900 int evtchn, irq, ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700901
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400902 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700903
904 irq = per_cpu(virq_to_irq, cpu)[virq];
905
906 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000907 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000908 if (irq == -1)
909 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700910
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100911 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700912 handle_percpu_irq, "virq");
913
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700914 bind_virq.virq = virq;
915 bind_virq.vcpu = cpu;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200916 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
917 &bind_virq);
918 if (ret == 0)
919 evtchn = bind_virq.port;
920 else {
921 if (ret == -EEXIST)
922 ret = find_virq(virq, cpu);
923 BUG_ON(ret < 0);
924 evtchn = ret;
925 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700926
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000927 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700928
929 bind_evtchn_to_cpu(evtchn, cpu);
930 }
931
Ian Campbell7bee9762011-03-10 16:08:15 +0000932out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400933 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700934
935 return irq;
936}
937
938static void unbind_from_irq(unsigned int irq)
939{
940 struct evtchn_close close;
941 int evtchn = evtchn_from_irq(irq);
942
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400943 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700944
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800945 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700946 close.port = evtchn;
947 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
948 BUG();
949
950 switch (type_from_irq(irq)) {
951 case IRQT_VIRQ:
952 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800953 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700954 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100955 case IRQT_IPI:
956 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800957 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100958 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700959 default:
960 break;
961 }
962
963 /* Closed ports are implicitly re-bound to VCPU0. */
964 bind_evtchn_to_cpu(evtchn, 0);
965
966 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +0000967 }
968
Ian Campbellca62ce82011-03-10 16:08:12 +0000969 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700970
Ian Campbell9158c352011-03-10 16:08:09 +0000971 xen_free_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700972
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400973 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700974}
975
976int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400977 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700978 unsigned long irqflags,
979 const char *devname, void *dev_id)
980{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200981 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700982
983 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +0000984 if (irq < 0)
985 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700986 retval = request_irq(irq, handler, irqflags, devname, dev_id);
987 if (retval != 0) {
988 unbind_from_irq(irq);
989 return retval;
990 }
991
992 return irq;
993}
994EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
995
Ian Campbell2e820f52009-02-09 12:05:50 -0800996int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
997 unsigned int remote_port,
998 irq_handler_t handler,
999 unsigned long irqflags,
1000 const char *devname,
1001 void *dev_id)
1002{
1003 int irq, retval;
1004
1005 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1006 if (irq < 0)
1007 return irq;
1008
1009 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1010 if (retval != 0) {
1011 unbind_from_irq(irq);
1012 return retval;
1013 }
1014
1015 return irq;
1016}
1017EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1018
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001019int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -04001020 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001021 unsigned long irqflags, const char *devname, void *dev_id)
1022{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001023 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001024
1025 irq = bind_virq_to_irq(virq, cpu);
Ian Campbell7bee9762011-03-10 16:08:15 +00001026 if (irq < 0)
1027 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001028 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1029 if (retval != 0) {
1030 unbind_from_irq(irq);
1031 return retval;
1032 }
1033
1034 return irq;
1035}
1036EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1037
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001038int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1039 unsigned int cpu,
1040 irq_handler_t handler,
1041 unsigned long irqflags,
1042 const char *devname,
1043 void *dev_id)
1044{
1045 int irq, retval;
1046
1047 irq = bind_ipi_to_irq(ipi, cpu);
1048 if (irq < 0)
1049 return irq;
1050
Ian Campbell9bab0b72011-10-03 15:37:00 +01001051 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001052 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1053 if (retval != 0) {
1054 unbind_from_irq(irq);
1055 return retval;
1056 }
1057
1058 return irq;
1059}
1060
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001061void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1062{
1063 free_irq(irq, dev_id);
1064 unbind_from_irq(irq);
1065}
1066EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1067
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001068void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1069{
1070 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1071 BUG_ON(irq < 0);
1072 notify_remote_via_irq(irq);
1073}
1074
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001075irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1076{
1077 struct shared_info *sh = HYPERVISOR_shared_info;
1078 int cpu = smp_processor_id();
Ian Campbellcb60d112011-03-10 16:08:08 +00001079 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001080 int i;
1081 unsigned long flags;
1082 static DEFINE_SPINLOCK(debug_lock);
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001083 struct vcpu_info *v;
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001084
1085 spin_lock_irqsave(&debug_lock, flags);
1086
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001087 printk("\nvcpu %d\n ", cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001088
1089 for_each_online_cpu(i) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001090 int pending;
1091 v = per_cpu(xen_vcpu, i);
1092 pending = (get_irq_regs() && i == cpu)
1093 ? xen_irqs_disabled(get_irq_regs())
1094 : v->evtchn_upcall_mask;
1095 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1096 pending, v->evtchn_upcall_pending,
1097 (int)(sizeof(v->evtchn_pending_sel)*2),
1098 v->evtchn_pending_sel);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001099 }
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001100 v = per_cpu(xen_vcpu, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001101
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001102 printk("\npending:\n ");
1103 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1104 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1105 sh->evtchn_pending[i],
1106 i % 8 == 0 ? "\n " : " ");
1107 printk("\nglobal mask:\n ");
1108 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1109 printk("%0*lx%s",
1110 (int)(sizeof(sh->evtchn_mask[0])*2),
1111 sh->evtchn_mask[i],
1112 i % 8 == 0 ? "\n " : " ");
1113
1114 printk("\nglobally unmasked:\n ");
1115 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1116 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1117 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1118 i % 8 == 0 ? "\n " : " ");
1119
1120 printk("\nlocal cpu%d mask:\n ", cpu);
1121 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1122 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1123 cpu_evtchn[i],
1124 i % 8 == 0 ? "\n " : " ");
1125
1126 printk("\nlocally unmasked:\n ");
1127 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1128 unsigned long pending = sh->evtchn_pending[i]
1129 & ~sh->evtchn_mask[i]
1130 & cpu_evtchn[i];
1131 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1132 pending, i % 8 == 0 ? "\n " : " ");
1133 }
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001134
1135 printk("\npending list:\n");
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001136 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001137 if (sync_test_bit(i, sh->evtchn_pending)) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001138 int word_idx = i / BITS_PER_LONG;
1139 printk(" %d: event %d -> irq %d%s%s%s\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001140 cpu_from_evtchn(i), i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001141 evtchn_to_irq[i],
1142 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1143 ? "" : " l2-clear",
1144 !sync_test_bit(i, sh->evtchn_mask)
1145 ? "" : " globally-masked",
1146 sync_test_bit(i, cpu_evtchn)
1147 ? "" : " locally-masked");
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001148 }
1149 }
1150
1151 spin_unlock_irqrestore(&debug_lock, flags);
1152
1153 return IRQ_HANDLED;
1154}
1155
Tejun Heo245b2e72009-06-24 15:13:48 +09001156static DEFINE_PER_CPU(unsigned, xed_nesting_count);
Keir Fraserada68142011-03-03 10:01:11 +00001157static DEFINE_PER_CPU(unsigned int, current_word_idx);
1158static DEFINE_PER_CPU(unsigned int, current_bit_idx);
Tejun Heo245b2e72009-06-24 15:13:48 +09001159
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001160/*
Scott Rixnerab7f8632011-03-03 09:30:08 +00001161 * Mask out the i least significant bits of w
1162 */
1163#define MASK_LSBS(w, i) (w & ((~0UL) << i))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001164
1165/*
1166 * Search the CPUs pending events bitmasks. For each one found, map
1167 * the event number to an irq, and feed it into do_IRQ() for
1168 * handling.
1169 *
1170 * Xen uses a two-level bitmap to speed searching. The first level is
1171 * a bitset of words which contain pending event bits. The second
1172 * level is a bitset of pending events themselves.
1173 */
Sheng Yang38e20b02010-05-14 12:40:51 +01001174static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001175{
Keir Fraser24b51c22011-03-03 11:06:28 +00001176 int start_word_idx, start_bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001177 int word_idx, bit_idx;
Keir Fraser24b51c22011-03-03 11:06:28 +00001178 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001179 int cpu = get_cpu();
1180 struct shared_info *s = HYPERVISOR_shared_info;
Christoph Lameter780f36d2010-12-06 11:16:29 -06001181 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Ruslan Pisarev088c05a2011-07-26 14:16:13 +03001182 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001183
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001184 do {
1185 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001186
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001187 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001188
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001189 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001190 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001191
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001192#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1193 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -07001194 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001195#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001196 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001197
Keir Fraser24b51c22011-03-03 11:06:28 +00001198 start_word_idx = __this_cpu_read(current_word_idx);
1199 start_bit_idx = __this_cpu_read(current_bit_idx);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001200
Keir Fraser24b51c22011-03-03 11:06:28 +00001201 word_idx = start_word_idx;
1202
1203 for (i = 0; pending_words != 0; i++) {
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001204 unsigned long pending_bits;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001205 unsigned long words;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001206
Scott Rixnerab7f8632011-03-03 09:30:08 +00001207 words = MASK_LSBS(pending_words, word_idx);
1208
1209 /*
Keir Fraserada68142011-03-03 10:01:11 +00001210 * If we masked out all events, wrap to beginning.
Scott Rixnerab7f8632011-03-03 09:30:08 +00001211 */
1212 if (words == 0) {
Keir Fraserada68142011-03-03 10:01:11 +00001213 word_idx = 0;
1214 bit_idx = 0;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001215 continue;
1216 }
1217 word_idx = __ffs(words);
1218
Keir Fraser24b51c22011-03-03 11:06:28 +00001219 pending_bits = active_evtchns(cpu, s, word_idx);
1220 bit_idx = 0; /* usually scan entire word from start */
1221 if (word_idx == start_word_idx) {
1222 /* We scan the starting word in two parts */
1223 if (i == 0)
1224 /* 1st time: start in the middle */
1225 bit_idx = start_bit_idx;
1226 else
1227 /* 2nd time: mask bits done already */
1228 bit_idx &= (1UL << start_bit_idx) - 1;
1229 }
1230
Scott Rixnerab7f8632011-03-03 09:30:08 +00001231 do {
1232 unsigned long bits;
1233 int port, irq;
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001234 struct irq_desc *desc;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001235
Scott Rixnerab7f8632011-03-03 09:30:08 +00001236 bits = MASK_LSBS(pending_bits, bit_idx);
1237
1238 /* If we masked out all events, move on. */
Keir Fraserada68142011-03-03 10:01:11 +00001239 if (bits == 0)
Scott Rixnerab7f8632011-03-03 09:30:08 +00001240 break;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001241
1242 bit_idx = __ffs(bits);
1243
1244 /* Process port. */
1245 port = (word_idx * BITS_PER_LONG) + bit_idx;
1246 irq = evtchn_to_irq[port];
1247
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001248 if (irq != -1) {
1249 desc = irq_to_desc(irq);
1250 if (desc)
1251 generic_handle_irq_desc(irq, desc);
1252 }
Scott Rixnerab7f8632011-03-03 09:30:08 +00001253
Keir Fraserada68142011-03-03 10:01:11 +00001254 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1255
1256 /* Next caller starts at last processed + 1 */
1257 __this_cpu_write(current_word_idx,
1258 bit_idx ? word_idx :
1259 (word_idx+1) % BITS_PER_LONG);
1260 __this_cpu_write(current_bit_idx, bit_idx);
1261 } while (bit_idx != 0);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001262
Keir Fraser24b51c22011-03-03 11:06:28 +00001263 /* Scan start_l1i twice; all others once. */
1264 if ((word_idx != start_word_idx) || (i != 0))
Scott Rixnerab7f8632011-03-03 09:30:08 +00001265 pending_words &= ~(1UL << word_idx);
Keir Fraserada68142011-03-03 10:01:11 +00001266
1267 word_idx = (word_idx + 1) % BITS_PER_LONG;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001268 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001269
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001270 BUG_ON(!irqs_disabled());
1271
Christoph Lameter780f36d2010-12-06 11:16:29 -06001272 count = __this_cpu_read(xed_nesting_count);
1273 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001274 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001275
1276out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001277
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001278 put_cpu();
1279}
1280
Sheng Yang38e20b02010-05-14 12:40:51 +01001281void xen_evtchn_do_upcall(struct pt_regs *regs)
1282{
1283 struct pt_regs *old_regs = set_irq_regs(regs);
1284
1285 exit_idle();
1286 irq_enter();
1287
1288 __xen_evtchn_do_upcall();
1289
1290 irq_exit();
1291 set_irq_regs(old_regs);
1292}
1293
1294void xen_hvm_evtchn_do_upcall(void)
1295{
1296 __xen_evtchn_do_upcall();
1297}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001298EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001299
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001300/* Rebind a new event channel to an existing irq. */
1301void rebind_evtchn_irq(int evtchn, int irq)
1302{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001303 struct irq_info *info = info_for_irq(irq);
1304
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001305 /* Make sure the irq is masked, since the new event channel
1306 will also be masked. */
1307 disable_irq(irq);
1308
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001309 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001310
1311 /* After resume the irq<->evtchn mappings are all cleared out */
1312 BUG_ON(evtchn_to_irq[evtchn] != -1);
1313 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001314 so there should be a proper type */
1315 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001316
Ian Campbell9158c352011-03-10 16:08:09 +00001317 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001318
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001319 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001320
1321 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301322 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001323
1324 /* Unmask the event channel. */
1325 enable_irq(irq);
1326}
1327
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001328/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001329static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001330{
1331 struct evtchn_bind_vcpu bind_vcpu;
1332 int evtchn = evtchn_from_irq(irq);
1333
Ian Campbellbe494722011-03-10 16:08:02 +00001334 if (!VALID_EVTCHN(evtchn))
1335 return -1;
1336
1337 /*
1338 * Events delivered via platform PCI interrupts are always
1339 * routed to vcpu 0 and hence cannot be rebound.
1340 */
1341 if (xen_hvm_domain() && !xen_have_vector_callback)
Yinghai Lud5dedd42009-04-27 17:59:21 -07001342 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001343
1344 /* Send future instances of this interrupt to other vcpu. */
1345 bind_vcpu.port = evtchn;
1346 bind_vcpu.vcpu = tcpu;
1347
1348 /*
1349 * If this fails, it usually just indicates that we're dealing with a
1350 * virq or IPI channel, which don't actually need to be rebound. Ignore
1351 * it, but don't do the xenlinux-level rebind in that case.
1352 */
1353 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1354 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001355
1356 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001357}
1358
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001359static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1360 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001361{
Rusty Russell0de26522008-12-13 21:20:26 +10301362 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001363
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001364 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001365}
1366
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001367int resend_irq_on_evtchn(unsigned int irq)
1368{
1369 int masked, evtchn = evtchn_from_irq(irq);
1370 struct shared_info *s = HYPERVISOR_shared_info;
1371
1372 if (!VALID_EVTCHN(evtchn))
1373 return 1;
1374
1375 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1376 sync_set_bit(evtchn, s->evtchn_pending);
1377 if (!masked)
1378 unmask_evtchn(evtchn);
1379
1380 return 1;
1381}
1382
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001383static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001384{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001385 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001386
1387 if (VALID_EVTCHN(evtchn))
1388 unmask_evtchn(evtchn);
1389}
1390
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001391static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001392{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001393 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001394
1395 if (VALID_EVTCHN(evtchn))
1396 mask_evtchn(evtchn);
1397}
1398
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001399static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001400{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001401 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001402
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001403 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001404
1405 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001406 clear_evtchn(evtchn);
1407}
1408
1409static void mask_ack_dynirq(struct irq_data *data)
1410{
1411 disable_dynirq(data);
1412 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001413}
1414
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001415static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001416{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001417 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001418 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001419 int ret = 0;
1420
1421 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001422 int masked;
1423
1424 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1425 sync_set_bit(evtchn, sh->evtchn_pending);
1426 if (!masked)
1427 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001428 ret = 1;
1429 }
1430
1431 return ret;
1432}
1433
Ian Campbell0a852262011-03-10 16:08:06 +00001434static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001435{
1436 int pirq, rc, irq, gsi;
1437 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001438 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001439
Ian Campbell69c358c2011-03-10 16:08:13 +00001440 list_for_each_entry(info, &xen_irq_list_head, list) {
1441 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001442 continue;
1443
Ian Campbell69c358c2011-03-10 16:08:13 +00001444 pirq = info->u.pirq.pirq;
1445 gsi = info->u.pirq.gsi;
1446 irq = info->irq;
1447
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001448 /* save/restore of PT devices doesn't work, so at this point the
1449 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001450 if (!gsi)
1451 continue;
1452
1453 map_irq.domid = DOMID_SELF;
1454 map_irq.type = MAP_PIRQ_TYPE_GSI;
1455 map_irq.index = gsi;
1456 map_irq.pirq = pirq;
1457
1458 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1459 if (rc) {
1460 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1461 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001462 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001463 continue;
1464 }
1465
1466 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1467
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001468 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001469 }
1470}
1471
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001472static void restore_cpu_virqs(unsigned int cpu)
1473{
1474 struct evtchn_bind_virq bind_virq;
1475 int virq, irq, evtchn;
1476
1477 for (virq = 0; virq < NR_VIRQS; virq++) {
1478 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1479 continue;
1480
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001481 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001482
1483 /* Get a new binding from Xen. */
1484 bind_virq.virq = virq;
1485 bind_virq.vcpu = cpu;
1486 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1487 &bind_virq) != 0)
1488 BUG();
1489 evtchn = bind_virq.port;
1490
1491 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001492 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001493 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001494 }
1495}
1496
1497static void restore_cpu_ipis(unsigned int cpu)
1498{
1499 struct evtchn_bind_ipi bind_ipi;
1500 int ipi, irq, evtchn;
1501
1502 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1503 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1504 continue;
1505
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001506 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001507
1508 /* Get a new binding from Xen. */
1509 bind_ipi.vcpu = cpu;
1510 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1511 &bind_ipi) != 0)
1512 BUG();
1513 evtchn = bind_ipi.port;
1514
1515 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001516 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001517 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001518 }
1519}
1520
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001521/* Clear an irq's pending state, in preparation for polling on it */
1522void xen_clear_irq_pending(int irq)
1523{
1524 int evtchn = evtchn_from_irq(irq);
1525
1526 if (VALID_EVTCHN(evtchn))
1527 clear_evtchn(evtchn);
1528}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001529EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001530void xen_set_irq_pending(int irq)
1531{
1532 int evtchn = evtchn_from_irq(irq);
1533
1534 if (VALID_EVTCHN(evtchn))
1535 set_evtchn(evtchn);
1536}
1537
1538bool xen_test_irq_pending(int irq)
1539{
1540 int evtchn = evtchn_from_irq(irq);
1541 bool ret = false;
1542
1543 if (VALID_EVTCHN(evtchn))
1544 ret = test_evtchn(evtchn);
1545
1546 return ret;
1547}
1548
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001549/* Poll waiting for an irq to become pending with timeout. In the usual case,
1550 * the irq will be disabled so it won't deliver an interrupt. */
1551void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001552{
1553 evtchn_port_t evtchn = evtchn_from_irq(irq);
1554
1555 if (VALID_EVTCHN(evtchn)) {
1556 struct sched_poll poll;
1557
1558 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001559 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001560 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001561
1562 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1563 BUG();
1564 }
1565}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001566EXPORT_SYMBOL(xen_poll_irq_timeout);
1567/* Poll waiting for an irq to become pending. In the usual case, the
1568 * irq will be disabled so it won't deliver an interrupt. */
1569void xen_poll_irq(int irq)
1570{
1571 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1572}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001573
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001574/* Check whether the IRQ line is shared with other guests. */
1575int xen_test_irq_shared(int irq)
1576{
1577 struct irq_info *info = info_for_irq(irq);
1578 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1579
1580 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1581 return 0;
1582 return !(irq_status.flags & XENIRQSTAT_shared);
1583}
1584EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1585
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001586void xen_irq_resume(void)
1587{
Ian Campbell6cb65372011-03-10 16:08:11 +00001588 unsigned int cpu, evtchn;
1589 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001590
1591 init_evtchn_cpu_bindings();
1592
1593 /* New event-channel space is not 'live' yet. */
1594 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1595 mask_evtchn(evtchn);
1596
1597 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001598 list_for_each_entry(info, &xen_irq_list_head, list)
1599 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001600
1601 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1602 evtchn_to_irq[evtchn] = -1;
1603
1604 for_each_possible_cpu(cpu) {
1605 restore_cpu_virqs(cpu);
1606 restore_cpu_ipis(cpu);
1607 }
Ian Campbell69035912010-11-01 16:30:09 +00001608
Ian Campbell0a852262011-03-10 16:08:06 +00001609 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001610}
1611
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001612static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001613 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001614
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001615 .irq_disable = disable_dynirq,
1616 .irq_mask = disable_dynirq,
1617 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001618
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001619 .irq_ack = ack_dynirq,
1620 .irq_mask_ack = mask_ack_dynirq,
1621
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001622 .irq_set_affinity = set_affinity_irq,
1623 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001624};
1625
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001626static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001627 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001628
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001629 .irq_startup = startup_pirq,
1630 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001631 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001632 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001633
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001634 .irq_mask = disable_dynirq,
1635 .irq_unmask = enable_dynirq,
1636
1637 .irq_ack = eoi_pirq,
1638 .irq_eoi = eoi_pirq,
1639 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001640
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001641 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001642
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001643 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001644};
1645
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001646static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001647 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001648
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001649 .irq_disable = disable_dynirq,
1650 .irq_mask = disable_dynirq,
1651 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001652
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001653 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001654};
1655
Sheng Yang38e20b02010-05-14 12:40:51 +01001656int xen_set_callback_via(uint64_t via)
1657{
1658 struct xen_hvm_param a;
1659 a.domid = DOMID_SELF;
1660 a.index = HVM_PARAM_CALLBACK_IRQ;
1661 a.value = via;
1662 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1663}
1664EXPORT_SYMBOL_GPL(xen_set_callback_via);
1665
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001666#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001667/* Vector callbacks are better than PCI interrupts to receive event
1668 * channel notifications because we can receive vector callbacks on any
1669 * vcpu and we don't need PCI support or APIC interactions. */
1670void xen_callback_vector(void)
1671{
1672 int rc;
1673 uint64_t callback_via;
1674 if (xen_have_vector_callback) {
1675 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1676 rc = xen_set_callback_via(callback_via);
1677 if (rc) {
1678 printk(KERN_ERR "Request for Xen HVM callback vector"
1679 " failed.\n");
1680 xen_have_vector_callback = 0;
1681 return;
1682 }
1683 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1684 "enabled\n");
1685 /* in the restore case the vector has already been allocated */
1686 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1687 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1688 }
1689}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001690#else
1691void xen_callback_vector(void) {}
1692#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001693
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001694void __init xen_init_IRQ(void)
1695{
Stefano Stabellinie5fc7342010-12-01 14:51:44 +00001696 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001697
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001698 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1699 GFP_KERNEL);
Konrad Rzeszutek Wilk9d093e22011-09-29 13:31:21 -04001700 BUG_ON(!evtchn_to_irq);
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001701 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1702 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001703
1704 init_evtchn_cpu_bindings();
1705
1706 /* No event channels are 'live' right now. */
1707 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1708 mask_evtchn(i);
1709
Sheng Yang38e20b02010-05-14 12:40:51 +01001710 if (xen_hvm_domain()) {
1711 xen_callback_vector();
1712 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001713 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1714 * __acpi_register_gsi can point at the right function */
1715 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001716 } else {
1717 irq_ctx_init(smp_processor_id());
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +01001718 if (xen_initial_domain())
Konrad Rzeszutek Wilka0ee0562011-06-09 09:49:13 -04001719 pci_xen_initial_domain();
Sheng Yang38e20b02010-05-14 12:40:51 +01001720 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001721}