blob: 212a5c871bf486872dcda8f60fcc15b9b584902d [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080029#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040031#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010032#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070033
Sheng Yang38e20b02010-05-14 12:40:51 +010034#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035#include <asm/ptrace.h>
36#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080037#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040038#include <asm/io_apic.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070039#include <asm/sync_bitops.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010040#include <asm/xen/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070042#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070043
Sheng Yang38e20b02010-05-14 12:40:51 +010044#include <xen/xen.h>
45#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070046#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070047#include <xen/events.h>
48#include <xen/interface/xen.h>
49#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010050#include <xen/interface/hvm/hvm_op.h>
51#include <xen/interface/hvm/params.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070052
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053/*
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -040057static DEFINE_MUTEX(irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070058
Ian Campbell6cb65372011-03-10 16:08:11 +000059static LIST_HEAD(xen_irq_list_head);
60
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070061/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090062static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070063
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070064/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090065static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070066
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080067/* Interrupt types. */
68enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080069 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070070 IRQT_PIRQ,
71 IRQT_VIRQ,
72 IRQT_IPI,
73 IRQT_EVTCHN
74};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070075
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080076/*
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
Stefano Stabellini42a1de52010-06-24 16:42:04 +010082 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080084 * VIRQ - virq number
85 * IPI - IPI vector
86 * EVTCHN -
87 */
88struct irq_info
89{
Ian Campbell6cb65372011-03-10 16:08:11 +000090 struct list_head list;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080091 enum xen_irq_type type; /* type */
Ian Campbell6cb65372011-03-10 16:08:11 +000092 unsigned irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080093 unsigned short evtchn; /* event channel */
94 unsigned short cpu; /* cpu bound */
95
96 union {
97 unsigned short virq;
98 enum ipi_vector ipi;
99 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100100 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800101 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400102 unsigned char vector;
103 unsigned char flags;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400104 uint16_t domid;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800105 } pirq;
106 } u;
107};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400108#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400109#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800110
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400111static int *evtchn_to_irq;
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700112
Ian Campbellcb60d112011-03-10 16:08:08 +0000113static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
114 cpu_evtchn_mask);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700115
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700116/* Xen will never allocate port zero for any purpose. */
117#define VALID_EVTCHN(chn) ((chn) != 0)
118
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700119static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700120static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400121static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100122static void enable_dynirq(struct irq_data *data);
123static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700124
Ian Campbell9158c352011-03-10 16:08:09 +0000125/* Get info for IRQ */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800126static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700127{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100128 return irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700129}
130
Ian Campbell9158c352011-03-10 16:08:09 +0000131/* Constructors for packed IRQ information. */
132static void xen_irq_info_common_init(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000133 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000134 enum xen_irq_type type,
135 unsigned short evtchn,
136 unsigned short cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700137{
Ian Campbell9158c352011-03-10 16:08:09 +0000138
139 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
140
141 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000142 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000143 info->evtchn = evtchn;
144 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000145
146 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700147}
148
Ian Campbell9158c352011-03-10 16:08:09 +0000149static void xen_irq_info_evtchn_init(unsigned irq,
150 unsigned short evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700151{
Ian Campbell9158c352011-03-10 16:08:09 +0000152 struct irq_info *info = info_for_irq(irq);
153
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000154 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700155}
156
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000157static void xen_irq_info_ipi_init(unsigned cpu,
158 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000159 unsigned short evtchn,
160 enum ipi_vector ipi)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700161{
Ian Campbell9158c352011-03-10 16:08:09 +0000162 struct irq_info *info = info_for_irq(irq);
163
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000164 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000165
166 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000167
168 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700169}
170
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000171static void xen_irq_info_virq_init(unsigned cpu,
172 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000173 unsigned short evtchn,
174 unsigned short virq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700175{
Ian Campbell9158c352011-03-10 16:08:09 +0000176 struct irq_info *info = info_for_irq(irq);
177
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000178 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000179
180 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000181
182 per_cpu(virq_to_irq, cpu)[virq] = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000183}
184
185static void xen_irq_info_pirq_init(unsigned irq,
186 unsigned short evtchn,
187 unsigned short pirq,
188 unsigned short gsi,
189 unsigned short vector,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400190 uint16_t domid,
Ian Campbell9158c352011-03-10 16:08:09 +0000191 unsigned char flags)
192{
193 struct irq_info *info = info_for_irq(irq);
194
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000195 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000196
197 info->u.pirq.pirq = pirq;
198 info->u.pirq.gsi = gsi;
199 info->u.pirq.vector = vector;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400200 info->u.pirq.domid = domid;
Ian Campbell9158c352011-03-10 16:08:09 +0000201 info->u.pirq.flags = flags;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700202}
203
204/*
205 * Accessors for packed IRQ information.
206 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800207static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700208{
Joe Jin110e7c72011-01-07 14:50:12 +0800209 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
210 return 0;
211
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800212 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700213}
214
Ian Campbelld4c04532009-02-06 19:20:31 -0800215unsigned irq_from_evtchn(unsigned int evtchn)
216{
217 return evtchn_to_irq[evtchn];
218}
219EXPORT_SYMBOL_GPL(irq_from_evtchn);
220
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800221static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700222{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800223 struct irq_info *info = info_for_irq(irq);
224
225 BUG_ON(info == NULL);
226 BUG_ON(info->type != IRQT_IPI);
227
228 return info->u.ipi;
229}
230
231static unsigned virq_from_irq(unsigned irq)
232{
233 struct irq_info *info = info_for_irq(irq);
234
235 BUG_ON(info == NULL);
236 BUG_ON(info->type != IRQT_VIRQ);
237
238 return info->u.virq;
239}
240
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100241static unsigned pirq_from_irq(unsigned irq)
242{
243 struct irq_info *info = info_for_irq(irq);
244
245 BUG_ON(info == NULL);
246 BUG_ON(info->type != IRQT_PIRQ);
247
248 return info->u.pirq.pirq;
249}
250
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800251static enum xen_irq_type type_from_irq(unsigned irq)
252{
253 return info_for_irq(irq)->type;
254}
255
256static unsigned cpu_from_irq(unsigned irq)
257{
258 return info_for_irq(irq)->cpu;
259}
260
261static unsigned int cpu_from_evtchn(unsigned int evtchn)
262{
263 int irq = evtchn_to_irq[evtchn];
264 unsigned ret = 0;
265
266 if (irq != -1)
267 ret = cpu_from_irq(irq);
268
269 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700270}
271
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400272static bool pirq_needs_eoi(unsigned irq)
273{
274 struct irq_info *info = info_for_irq(irq);
275
276 BUG_ON(info->type != IRQT_PIRQ);
277
278 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
279}
280
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700281static inline unsigned long active_evtchns(unsigned int cpu,
282 struct shared_info *sh,
283 unsigned int idx)
284{
285 return (sh->evtchn_pending[idx] &
Ian Campbellcb60d112011-03-10 16:08:08 +0000286 per_cpu(cpu_evtchn_mask, cpu)[idx] &
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700287 ~sh->evtchn_mask[idx]);
288}
289
290static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
291{
292 int irq = evtchn_to_irq[chn];
293
294 BUG_ON(irq == -1);
295#ifdef CONFIG_SMP
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000296 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700297#endif
298
Ian Campbellcb60d112011-03-10 16:08:08 +0000299 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
300 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700301
Ian Campbellca62ce82011-03-10 16:08:12 +0000302 info_for_irq(irq)->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700303}
304
305static void init_evtchn_cpu_bindings(void)
306{
Jan Beulich1c6969e2010-11-16 14:55:33 -0800307 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700308#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000309 struct irq_info *info;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200310
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700311 /* By default all event channels notify CPU#0. */
Ian Campbell6cb65372011-03-10 16:08:11 +0000312 list_for_each_entry(info, &xen_irq_list_head, list) {
313 struct irq_desc *desc = irq_to_desc(info->irq);
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000314 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800315 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700316#endif
317
Jan Beulich1c6969e2010-11-16 14:55:33 -0800318 for_each_possible_cpu(i)
Ian Campbellcb60d112011-03-10 16:08:08 +0000319 memset(per_cpu(cpu_evtchn_mask, i),
320 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700321}
322
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700323static inline void clear_evtchn(int port)
324{
325 struct shared_info *s = HYPERVISOR_shared_info;
326 sync_clear_bit(port, &s->evtchn_pending[0]);
327}
328
329static inline void set_evtchn(int port)
330{
331 struct shared_info *s = HYPERVISOR_shared_info;
332 sync_set_bit(port, &s->evtchn_pending[0]);
333}
334
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700335static inline int test_evtchn(int port)
336{
337 struct shared_info *s = HYPERVISOR_shared_info;
338 return sync_test_bit(port, &s->evtchn_pending[0]);
339}
340
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700341
342/**
343 * notify_remote_via_irq - send event to remote end of event channel via irq
344 * @irq: irq of event channel to send event to
345 *
346 * Unlike notify_remote_via_evtchn(), this is safe to use across
347 * save/restore. Notifications on a broken connection are silently
348 * dropped.
349 */
350void notify_remote_via_irq(int irq)
351{
352 int evtchn = evtchn_from_irq(irq);
353
354 if (VALID_EVTCHN(evtchn))
355 notify_remote_via_evtchn(evtchn);
356}
357EXPORT_SYMBOL_GPL(notify_remote_via_irq);
358
359static void mask_evtchn(int port)
360{
361 struct shared_info *s = HYPERVISOR_shared_info;
362 sync_set_bit(port, &s->evtchn_mask[0]);
363}
364
365static void unmask_evtchn(int port)
366{
367 struct shared_info *s = HYPERVISOR_shared_info;
368 unsigned int cpu = get_cpu();
369
370 BUG_ON(!irqs_disabled());
371
372 /* Slow path (hypercall) if this is a non-local port. */
373 if (unlikely(cpu != cpu_from_evtchn(port))) {
374 struct evtchn_unmask unmask = { .port = port };
375 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
376 } else {
Christoph Lameter780f36d2010-12-06 11:16:29 -0600377 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700378
379 sync_clear_bit(port, &s->evtchn_mask[0]);
380
381 /*
382 * The following is basically the equivalent of
383 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
384 * the interrupt edge' if the channel is masked.
385 */
386 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
387 !sync_test_and_set_bit(port / BITS_PER_LONG,
388 &vcpu_info->evtchn_pending_sel))
389 vcpu_info->evtchn_upcall_pending = 1;
390 }
391
392 put_cpu();
393}
394
Ian Campbell6cb65372011-03-10 16:08:11 +0000395static void xen_irq_init(unsigned irq)
396{
397 struct irq_info *info;
Konrad Rzeszutek Wilkb5328cd2011-06-15 14:24:29 -0400398#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000399 struct irq_desc *desc = irq_to_desc(irq);
400
401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400403#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000404
Ian Campbellca62ce82011-03-10 16:08:12 +0000405 info = kzalloc(sizeof(*info), GFP_KERNEL);
406 if (info == NULL)
407 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000408
409 info->type = IRQT_UNBOUND;
410
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100411 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000412
Ian Campbell6cb65372011-03-10 16:08:11 +0000413 list_add_tail(&info->list, &xen_irq_list_head);
414}
415
Ian Campbell7bee9762011-03-10 16:08:15 +0000416static int __must_check xen_allocate_irq_dynamic(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700417{
Ian Campbell89911502011-03-03 11:57:44 -0500418 int first = 0;
419 int irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700420
Ian Campbell89911502011-03-03 11:57:44 -0500421#ifdef CONFIG_X86_IO_APIC
422 /*
423 * For an HVM guest or domain 0 which see "real" (emulated or
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300424 * actual respectively) GSIs we allocate dynamic IRQs
Ian Campbell89911502011-03-03 11:57:44 -0500425 * e.g. those corresponding to event channels or MSIs
426 * etc. from the range above those "real" GSIs to avoid
427 * collisions.
Konrad Rzeszutek Wilkd1b758e2010-12-09 14:53:29 -0500428 */
Ian Campbell89911502011-03-03 11:57:44 -0500429 if (xen_initial_domain() || xen_hvm_domain())
430 first = get_nr_irqs_gsi();
431#endif
432
Ian Campbell89911502011-03-03 11:57:44 -0500433 irq = irq_alloc_desc_from(first, -1);
434
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400435 if (irq >= 0)
436 xen_irq_init(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800437
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700438 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400439}
440
Ian Campbell7bee9762011-03-10 16:08:15 +0000441static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000442{
443 int irq;
444
Ian Campbell89911502011-03-03 11:57:44 -0500445 /*
446 * A PV guest has no concept of a GSI (since it has no ACPI
447 * nor access to/knowledge of the physical APICs). Therefore
448 * all IRQs are dynamically allocated from the entire IRQ
449 * space.
450 */
451 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000452 return xen_allocate_irq_dynamic();
453
454 /* Legacy IRQ descriptors are already allocated by the arch. */
455 if (gsi < NR_IRQS_LEGACY)
Ian Campbell6cb65372011-03-10 16:08:11 +0000456 irq = gsi;
457 else
458 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000459
Ian Campbell6cb65372011-03-10 16:08:11 +0000460 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000461
462 return irq;
463}
464
465static void xen_free_irq(unsigned irq)
466{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100467 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000468
469 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000470
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100471 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000472
473 kfree(info);
474
Ian Campbell72146102011-02-03 09:49:35 +0000475 /* Legacy IRQ descriptors are managed by the arch. */
476 if (irq < NR_IRQS_LEGACY)
477 return;
478
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000479 irq_free_desc(irq);
480}
481
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400482static void pirq_query_unmask(int irq)
483{
484 struct physdev_irq_status_query irq_status;
485 struct irq_info *info = info_for_irq(irq);
486
487 BUG_ON(info->type != IRQT_PIRQ);
488
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100489 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400490 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
491 irq_status.flags = 0;
492
493 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
494 if (irq_status.flags & XENIRQSTAT_needs_eoi)
495 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
496}
497
498static bool probing_irq(int irq)
499{
500 struct irq_desc *desc = irq_to_desc(irq);
501
502 return desc && desc->action == NULL;
503}
504
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100505static void eoi_pirq(struct irq_data *data)
506{
507 int evtchn = evtchn_from_irq(data->irq);
508 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
509 int rc = 0;
510
511 irq_move_irq(data);
512
513 if (VALID_EVTCHN(evtchn))
514 clear_evtchn(evtchn);
515
516 if (pirq_needs_eoi(data->irq)) {
517 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
518 WARN_ON(rc);
519 }
520}
521
522static void mask_ack_pirq(struct irq_data *data)
523{
524 disable_dynirq(data);
525 eoi_pirq(data);
526}
527
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000528static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400529{
530 struct evtchn_bind_pirq bind_pirq;
531 struct irq_info *info = info_for_irq(irq);
532 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400533 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400534
535 BUG_ON(info->type != IRQT_PIRQ);
536
537 if (VALID_EVTCHN(evtchn))
538 goto out;
539
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100540 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400541 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400542 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
543 BIND_PIRQ__WILL_SHARE : 0;
544 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
545 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400546 if (!probing_irq(irq))
547 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
548 irq);
549 return 0;
550 }
551 evtchn = bind_pirq.port;
552
553 pirq_query_unmask(irq);
554
555 evtchn_to_irq[evtchn] = irq;
556 bind_evtchn_to_cpu(evtchn, 0);
557 info->evtchn = evtchn;
558
559out:
560 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100561 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400562
563 return 0;
564}
565
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000566static unsigned int startup_pirq(struct irq_data *data)
567{
568 return __startup_pirq(data->irq);
569}
570
571static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400572{
573 struct evtchn_close close;
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000574 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400575 struct irq_info *info = info_for_irq(irq);
576 int evtchn = evtchn_from_irq(irq);
577
578 BUG_ON(info->type != IRQT_PIRQ);
579
580 if (!VALID_EVTCHN(evtchn))
581 return;
582
583 mask_evtchn(evtchn);
584
585 close.port = evtchn;
586 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
587 BUG();
588
589 bind_evtchn_to_cpu(evtchn, 0);
590 evtchn_to_irq[evtchn] = -1;
591 info->evtchn = 0;
592}
593
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000594static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400595{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000596 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400597}
598
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000599static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400600{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100601 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400602}
603
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400604static int find_irq_by_gsi(unsigned gsi)
605{
Ian Campbell6cb65372011-03-10 16:08:11 +0000606 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400607
Ian Campbell6cb65372011-03-10 16:08:11 +0000608 list_for_each_entry(info, &xen_irq_list_head, list) {
609 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400610 continue;
611
Ian Campbell6cb65372011-03-10 16:08:11 +0000612 if (info->u.pirq.gsi == gsi)
613 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400614 }
615
616 return -1;
617}
618
Ian Campbell653378a2011-03-10 16:08:04 +0000619/*
620 * Do not make any assumptions regarding the relationship between the
621 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100622 *
623 * Note: We don't assign an event channel until the irq actually started
624 * up. Return an existing irq if we've already got one for the gsi.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100625 *
626 * Shareable implies level triggered, not shareable implies edge
627 * triggered here.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400628 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000629int xen_bind_pirq_gsi_to_irq(unsigned gsi,
630 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400631{
Ian Campbella0e18112011-03-10 16:08:03 +0000632 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400633 struct physdev_irq irq_op;
634
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400635 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400636
637 irq = find_irq_by_gsi(gsi);
638 if (irq != -1) {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100639 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400640 irq, gsi);
641 goto out; /* XXX need refcount? */
642 }
643
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000644 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000645 if (irq < 0)
646 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400647
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400648 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400649 irq_op.vector = 0;
650
651 /* Only the privileged domain can do this. For non-priv, the pcifront
652 * driver provides a PCI bus that does the call to do exactly
653 * this in the priv domain. */
654 if (xen_initial_domain() &&
655 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000656 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400657 irq = -ENOSPC;
658 goto out;
659 }
660
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400661 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
Ian Campbell9158c352011-03-10 16:08:09 +0000662 shareable ? PIRQ_SHAREABLE : 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400663
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100664 pirq_query_unmask(irq);
665 /* We try to use the handler with the appropriate semantic for the
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100666 * type of interrupt: if the interrupt is an edge triggered
667 * interrupt we use handle_edge_irq.
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100668 *
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100669 * On the other hand if the interrupt is level triggered we use
670 * handle_fasteoi_irq like the native code does for this kind of
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100671 * interrupts.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100672 *
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100673 * Depending on the Xen version, pirq_needs_eoi might return true
674 * not only for level triggered interrupts but for edge triggered
675 * interrupts too. In any case Xen always honors the eoi mechanism,
676 * not injecting any more pirqs of the same kind if the first one
677 * hasn't received an eoi yet. Therefore using the fasteoi handler
678 * is the right choice either way.
679 */
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100680 if (shareable)
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100681 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
682 handle_fasteoi_irq, name);
683 else
684 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
685 handle_edge_irq, name);
686
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400687out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400688 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400689
690 return irq;
691}
692
Qing Hef731e3ef2010-10-11 15:30:09 +0100693#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000694int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000695{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000696 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000697 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000698
Ian Campbellbf480d92011-02-18 16:43:32 +0000699 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000700 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000701
Ian Campbell5cad61a2011-02-18 16:43:31 +0000702 WARN_ONCE(rc == -ENOSYS,
703 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
704
705 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000706}
707
Ian Campbellbf480d92011-02-18 16:43:32 +0000708int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400709 int pirq, int vector, const char *name,
710 domid_t domid)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100711{
Ian Campbellbf480d92011-02-18 16:43:32 +0000712 int irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000713
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400714 mutex_lock(&irq_mapping_update_lock);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100715
Ian Campbell4b41df72011-02-18 16:43:29 +0000716 irq = xen_allocate_irq_dynamic();
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400717 if (irq < 0)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000718 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100719
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100720 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
721 name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100722
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400723 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700724 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000725 if (ret < 0)
726 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100727out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400728 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000729 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000730error_irq:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400731 mutex_unlock(&irq_mapping_update_lock);
Ian Campbellbf480d92011-02-18 16:43:32 +0000732 xen_free_irq(irq);
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400733 return ret;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100734}
Qing Hef731e3ef2010-10-11 15:30:09 +0100735#endif
736
Alex Nixonb5401a92010-03-18 16:31:34 -0400737int xen_destroy_irq(int irq)
738{
739 struct irq_desc *desc;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100740 struct physdev_unmap_pirq unmap_irq;
741 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400742 int rc = -ENOENT;
743
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400744 mutex_lock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400745
746 desc = irq_to_desc(irq);
747 if (!desc)
748 goto out;
749
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100750 if (xen_initial_domain()) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500751 unmap_irq.pirq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400752 unmap_irq.domid = info->u.pirq.domid;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100753 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500754 /* If another domain quits without making the pci_disable_msix
755 * call, the Xen hypervisor takes care of freeing the PIRQs
756 * (free_domain_pirqs).
757 */
758 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
759 printk(KERN_INFO "domain %d does not have %d anymore\n",
760 info->u.pirq.domid, info->u.pirq.pirq);
761 else if (rc) {
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100762 printk(KERN_WARNING "unmap irq failed %d\n", rc);
763 goto out;
764 }
765 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400766
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000767 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400768
769out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400770 mutex_unlock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400771 return rc;
772}
773
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000774int xen_irq_from_pirq(unsigned pirq)
775{
Ian Campbell69c358c2011-03-10 16:08:13 +0000776 int irq;
777
778 struct irq_info *info;
779
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400780 mutex_lock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000781
782 list_for_each_entry(info, &xen_irq_list_head, list) {
Konrad Rzeszutek Wilk9bb9efe2011-09-29 13:13:30 -0400783 if (info->type != IRQT_PIRQ)
Ian Campbell69c358c2011-03-10 16:08:13 +0000784 continue;
785 irq = info->irq;
786 if (info->u.pirq.pirq == pirq)
787 goto out;
788 }
789 irq = -1;
790out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400791 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000792
793 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000794}
795
Konrad Rzeszutek Wilke6197ac2011-02-24 14:20:12 -0500796
797int xen_pirq_from_irq(unsigned irq)
798{
799 return pirq_from_irq(irq);
800}
801EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700802int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700803{
804 int irq;
805
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400806 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700807
808 irq = evtchn_to_irq[evtchn];
809
810 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000811 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000812 if (irq == -1)
813 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700814
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100815 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100816 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700817
Ian Campbell9158c352011-03-10 16:08:09 +0000818 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700819 }
820
Ian Campbell7bee9762011-03-10 16:08:15 +0000821out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400822 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700823
824 return irq;
825}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700826EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700827
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700828static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
829{
830 struct evtchn_bind_ipi bind_ipi;
831 int evtchn, irq;
832
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400833 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700834
835 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800836
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700837 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000838 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700839 if (irq < 0)
840 goto out;
841
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100842 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700843 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700844
845 bind_ipi.vcpu = cpu;
846 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
847 &bind_ipi) != 0)
848 BUG();
849 evtchn = bind_ipi.port;
850
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000851 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700852
853 bind_evtchn_to_cpu(evtchn, cpu);
854 }
855
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700856 out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400857 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700858 return irq;
859}
860
Ian Campbell2e820f52009-02-09 12:05:50 -0800861static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
862 unsigned int remote_port)
863{
864 struct evtchn_bind_interdomain bind_interdomain;
865 int err;
866
867 bind_interdomain.remote_dom = remote_domain;
868 bind_interdomain.remote_port = remote_port;
869
870 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
871 &bind_interdomain);
872
873 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
874}
875
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700876
Jeremy Fitzhardinge4fe7d5a2010-09-02 16:17:06 +0100877int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700878{
879 struct evtchn_bind_virq bind_virq;
880 int evtchn, irq;
881
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400882 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700883
884 irq = per_cpu(virq_to_irq, cpu)[virq];
885
886 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000887 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000888 if (irq == -1)
889 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700890
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100891 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700892 handle_percpu_irq, "virq");
893
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700894 bind_virq.virq = virq;
895 bind_virq.vcpu = cpu;
896 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
897 &bind_virq) != 0)
898 BUG();
899 evtchn = bind_virq.port;
900
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000901 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700902
903 bind_evtchn_to_cpu(evtchn, cpu);
904 }
905
Ian Campbell7bee9762011-03-10 16:08:15 +0000906out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400907 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700908
909 return irq;
910}
911
912static void unbind_from_irq(unsigned int irq)
913{
914 struct evtchn_close close;
915 int evtchn = evtchn_from_irq(irq);
916
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400917 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700918
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800919 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700920 close.port = evtchn;
921 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
922 BUG();
923
924 switch (type_from_irq(irq)) {
925 case IRQT_VIRQ:
926 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800927 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700928 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100929 case IRQT_IPI:
930 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800931 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100932 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700933 default:
934 break;
935 }
936
937 /* Closed ports are implicitly re-bound to VCPU0. */
938 bind_evtchn_to_cpu(evtchn, 0);
939
940 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +0000941 }
942
Ian Campbellca62ce82011-03-10 16:08:12 +0000943 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700944
Ian Campbell9158c352011-03-10 16:08:09 +0000945 xen_free_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700946
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400947 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700948}
949
950int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400951 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700952 unsigned long irqflags,
953 const char *devname, void *dev_id)
954{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200955 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700956
957 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +0000958 if (irq < 0)
959 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700960 retval = request_irq(irq, handler, irqflags, devname, dev_id);
961 if (retval != 0) {
962 unbind_from_irq(irq);
963 return retval;
964 }
965
966 return irq;
967}
968EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
969
Ian Campbell2e820f52009-02-09 12:05:50 -0800970int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
971 unsigned int remote_port,
972 irq_handler_t handler,
973 unsigned long irqflags,
974 const char *devname,
975 void *dev_id)
976{
977 int irq, retval;
978
979 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
980 if (irq < 0)
981 return irq;
982
983 retval = request_irq(irq, handler, irqflags, devname, dev_id);
984 if (retval != 0) {
985 unbind_from_irq(irq);
986 return retval;
987 }
988
989 return irq;
990}
991EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
992
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700993int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -0400994 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700995 unsigned long irqflags, const char *devname, void *dev_id)
996{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200997 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700998
999 irq = bind_virq_to_irq(virq, cpu);
Ian Campbell7bee9762011-03-10 16:08:15 +00001000 if (irq < 0)
1001 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001002 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1003 if (retval != 0) {
1004 unbind_from_irq(irq);
1005 return retval;
1006 }
1007
1008 return irq;
1009}
1010EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1011
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001012int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1013 unsigned int cpu,
1014 irq_handler_t handler,
1015 unsigned long irqflags,
1016 const char *devname,
1017 void *dev_id)
1018{
1019 int irq, retval;
1020
1021 irq = bind_ipi_to_irq(ipi, cpu);
1022 if (irq < 0)
1023 return irq;
1024
Thomas Gleixner676dc3c2011-02-05 20:08:59 +00001025 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001026 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1027 if (retval != 0) {
1028 unbind_from_irq(irq);
1029 return retval;
1030 }
1031
1032 return irq;
1033}
1034
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001035void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1036{
1037 free_irq(irq, dev_id);
1038 unbind_from_irq(irq);
1039}
1040EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1041
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001042void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1043{
1044 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1045 BUG_ON(irq < 0);
1046 notify_remote_via_irq(irq);
1047}
1048
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001049irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1050{
1051 struct shared_info *sh = HYPERVISOR_shared_info;
1052 int cpu = smp_processor_id();
Ian Campbellcb60d112011-03-10 16:08:08 +00001053 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001054 int i;
1055 unsigned long flags;
1056 static DEFINE_SPINLOCK(debug_lock);
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001057 struct vcpu_info *v;
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001058
1059 spin_lock_irqsave(&debug_lock, flags);
1060
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001061 printk("\nvcpu %d\n ", cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001062
1063 for_each_online_cpu(i) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001064 int pending;
1065 v = per_cpu(xen_vcpu, i);
1066 pending = (get_irq_regs() && i == cpu)
1067 ? xen_irqs_disabled(get_irq_regs())
1068 : v->evtchn_upcall_mask;
1069 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1070 pending, v->evtchn_upcall_pending,
1071 (int)(sizeof(v->evtchn_pending_sel)*2),
1072 v->evtchn_pending_sel);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001073 }
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001074 v = per_cpu(xen_vcpu, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001075
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001076 printk("\npending:\n ");
1077 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1078 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1079 sh->evtchn_pending[i],
1080 i % 8 == 0 ? "\n " : " ");
1081 printk("\nglobal mask:\n ");
1082 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1083 printk("%0*lx%s",
1084 (int)(sizeof(sh->evtchn_mask[0])*2),
1085 sh->evtchn_mask[i],
1086 i % 8 == 0 ? "\n " : " ");
1087
1088 printk("\nglobally unmasked:\n ");
1089 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1090 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1091 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1092 i % 8 == 0 ? "\n " : " ");
1093
1094 printk("\nlocal cpu%d mask:\n ", cpu);
1095 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1096 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1097 cpu_evtchn[i],
1098 i % 8 == 0 ? "\n " : " ");
1099
1100 printk("\nlocally unmasked:\n ");
1101 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1102 unsigned long pending = sh->evtchn_pending[i]
1103 & ~sh->evtchn_mask[i]
1104 & cpu_evtchn[i];
1105 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1106 pending, i % 8 == 0 ? "\n " : " ");
1107 }
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001108
1109 printk("\npending list:\n");
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001110 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001111 if (sync_test_bit(i, sh->evtchn_pending)) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001112 int word_idx = i / BITS_PER_LONG;
1113 printk(" %d: event %d -> irq %d%s%s%s\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001114 cpu_from_evtchn(i), i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001115 evtchn_to_irq[i],
1116 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1117 ? "" : " l2-clear",
1118 !sync_test_bit(i, sh->evtchn_mask)
1119 ? "" : " globally-masked",
1120 sync_test_bit(i, cpu_evtchn)
1121 ? "" : " locally-masked");
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001122 }
1123 }
1124
1125 spin_unlock_irqrestore(&debug_lock, flags);
1126
1127 return IRQ_HANDLED;
1128}
1129
Tejun Heo245b2e72009-06-24 15:13:48 +09001130static DEFINE_PER_CPU(unsigned, xed_nesting_count);
Keir Fraserada68142011-03-03 10:01:11 +00001131static DEFINE_PER_CPU(unsigned int, current_word_idx);
1132static DEFINE_PER_CPU(unsigned int, current_bit_idx);
Tejun Heo245b2e72009-06-24 15:13:48 +09001133
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001134/*
Scott Rixnerab7f8632011-03-03 09:30:08 +00001135 * Mask out the i least significant bits of w
1136 */
1137#define MASK_LSBS(w, i) (w & ((~0UL) << i))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001138
1139/*
1140 * Search the CPUs pending events bitmasks. For each one found, map
1141 * the event number to an irq, and feed it into do_IRQ() for
1142 * handling.
1143 *
1144 * Xen uses a two-level bitmap to speed searching. The first level is
1145 * a bitset of words which contain pending event bits. The second
1146 * level is a bitset of pending events themselves.
1147 */
Sheng Yang38e20b02010-05-14 12:40:51 +01001148static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001149{
Keir Fraser24b51c22011-03-03 11:06:28 +00001150 int start_word_idx, start_bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001151 int word_idx, bit_idx;
Keir Fraser24b51c22011-03-03 11:06:28 +00001152 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001153 int cpu = get_cpu();
1154 struct shared_info *s = HYPERVISOR_shared_info;
Christoph Lameter780f36d2010-12-06 11:16:29 -06001155 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001156 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001157
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001158 do {
1159 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001160
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001161 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001162
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001163 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001164 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001165
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001166#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1167 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -07001168 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001169#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001170 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001171
Keir Fraser24b51c22011-03-03 11:06:28 +00001172 start_word_idx = __this_cpu_read(current_word_idx);
1173 start_bit_idx = __this_cpu_read(current_bit_idx);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001174
Keir Fraser24b51c22011-03-03 11:06:28 +00001175 word_idx = start_word_idx;
1176
1177 for (i = 0; pending_words != 0; i++) {
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001178 unsigned long pending_bits;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001179 unsigned long words;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001180
Scott Rixnerab7f8632011-03-03 09:30:08 +00001181 words = MASK_LSBS(pending_words, word_idx);
1182
1183 /*
Keir Fraserada68142011-03-03 10:01:11 +00001184 * If we masked out all events, wrap to beginning.
Scott Rixnerab7f8632011-03-03 09:30:08 +00001185 */
1186 if (words == 0) {
Keir Fraserada68142011-03-03 10:01:11 +00001187 word_idx = 0;
1188 bit_idx = 0;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001189 continue;
1190 }
1191 word_idx = __ffs(words);
1192
Keir Fraser24b51c22011-03-03 11:06:28 +00001193 pending_bits = active_evtchns(cpu, s, word_idx);
1194 bit_idx = 0; /* usually scan entire word from start */
1195 if (word_idx == start_word_idx) {
1196 /* We scan the starting word in two parts */
1197 if (i == 0)
1198 /* 1st time: start in the middle */
1199 bit_idx = start_bit_idx;
1200 else
1201 /* 2nd time: mask bits done already */
1202 bit_idx &= (1UL << start_bit_idx) - 1;
1203 }
1204
Scott Rixnerab7f8632011-03-03 09:30:08 +00001205 do {
1206 unsigned long bits;
1207 int port, irq;
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001208 struct irq_desc *desc;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001209
Scott Rixnerab7f8632011-03-03 09:30:08 +00001210 bits = MASK_LSBS(pending_bits, bit_idx);
1211
1212 /* If we masked out all events, move on. */
Keir Fraserada68142011-03-03 10:01:11 +00001213 if (bits == 0)
Scott Rixnerab7f8632011-03-03 09:30:08 +00001214 break;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001215
1216 bit_idx = __ffs(bits);
1217
1218 /* Process port. */
1219 port = (word_idx * BITS_PER_LONG) + bit_idx;
1220 irq = evtchn_to_irq[port];
1221
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001222 if (irq != -1) {
1223 desc = irq_to_desc(irq);
1224 if (desc)
1225 generic_handle_irq_desc(irq, desc);
1226 }
Scott Rixnerab7f8632011-03-03 09:30:08 +00001227
Keir Fraserada68142011-03-03 10:01:11 +00001228 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1229
1230 /* Next caller starts at last processed + 1 */
1231 __this_cpu_write(current_word_idx,
1232 bit_idx ? word_idx :
1233 (word_idx+1) % BITS_PER_LONG);
1234 __this_cpu_write(current_bit_idx, bit_idx);
1235 } while (bit_idx != 0);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001236
Keir Fraser24b51c22011-03-03 11:06:28 +00001237 /* Scan start_l1i twice; all others once. */
1238 if ((word_idx != start_word_idx) || (i != 0))
Scott Rixnerab7f8632011-03-03 09:30:08 +00001239 pending_words &= ~(1UL << word_idx);
Keir Fraserada68142011-03-03 10:01:11 +00001240
1241 word_idx = (word_idx + 1) % BITS_PER_LONG;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001242 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001243
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001244 BUG_ON(!irqs_disabled());
1245
Christoph Lameter780f36d2010-12-06 11:16:29 -06001246 count = __this_cpu_read(xed_nesting_count);
1247 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001248 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001249
1250out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001251
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001252 put_cpu();
1253}
1254
Sheng Yang38e20b02010-05-14 12:40:51 +01001255void xen_evtchn_do_upcall(struct pt_regs *regs)
1256{
1257 struct pt_regs *old_regs = set_irq_regs(regs);
1258
1259 exit_idle();
1260 irq_enter();
1261
1262 __xen_evtchn_do_upcall();
1263
1264 irq_exit();
1265 set_irq_regs(old_regs);
1266}
1267
1268void xen_hvm_evtchn_do_upcall(void)
1269{
1270 __xen_evtchn_do_upcall();
1271}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001272EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001273
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001274/* Rebind a new event channel to an existing irq. */
1275void rebind_evtchn_irq(int evtchn, int irq)
1276{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001277 struct irq_info *info = info_for_irq(irq);
1278
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001279 /* Make sure the irq is masked, since the new event channel
1280 will also be masked. */
1281 disable_irq(irq);
1282
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001283 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001284
1285 /* After resume the irq<->evtchn mappings are all cleared out */
1286 BUG_ON(evtchn_to_irq[evtchn] != -1);
1287 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001288 so there should be a proper type */
1289 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001290
Ian Campbell9158c352011-03-10 16:08:09 +00001291 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001292
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001293 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001294
1295 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301296 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001297
1298 /* Unmask the event channel. */
1299 enable_irq(irq);
1300}
1301
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001302/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001303static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001304{
1305 struct evtchn_bind_vcpu bind_vcpu;
1306 int evtchn = evtchn_from_irq(irq);
1307
Ian Campbellbe494722011-03-10 16:08:02 +00001308 if (!VALID_EVTCHN(evtchn))
1309 return -1;
1310
1311 /*
1312 * Events delivered via platform PCI interrupts are always
1313 * routed to vcpu 0 and hence cannot be rebound.
1314 */
1315 if (xen_hvm_domain() && !xen_have_vector_callback)
Yinghai Lud5dedd42009-04-27 17:59:21 -07001316 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001317
1318 /* Send future instances of this interrupt to other vcpu. */
1319 bind_vcpu.port = evtchn;
1320 bind_vcpu.vcpu = tcpu;
1321
1322 /*
1323 * If this fails, it usually just indicates that we're dealing with a
1324 * virq or IPI channel, which don't actually need to be rebound. Ignore
1325 * it, but don't do the xenlinux-level rebind in that case.
1326 */
1327 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1328 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001329
1330 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001331}
1332
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001333static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1334 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001335{
Rusty Russell0de26522008-12-13 21:20:26 +10301336 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001337
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001338 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001339}
1340
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001341int resend_irq_on_evtchn(unsigned int irq)
1342{
1343 int masked, evtchn = evtchn_from_irq(irq);
1344 struct shared_info *s = HYPERVISOR_shared_info;
1345
1346 if (!VALID_EVTCHN(evtchn))
1347 return 1;
1348
1349 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1350 sync_set_bit(evtchn, s->evtchn_pending);
1351 if (!masked)
1352 unmask_evtchn(evtchn);
1353
1354 return 1;
1355}
1356
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001357static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001358{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001359 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001360
1361 if (VALID_EVTCHN(evtchn))
1362 unmask_evtchn(evtchn);
1363}
1364
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001365static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001366{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001367 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001368
1369 if (VALID_EVTCHN(evtchn))
1370 mask_evtchn(evtchn);
1371}
1372
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001373static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001374{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001375 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001376
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001377 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001378
1379 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001380 clear_evtchn(evtchn);
1381}
1382
1383static void mask_ack_dynirq(struct irq_data *data)
1384{
1385 disable_dynirq(data);
1386 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001387}
1388
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001389static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001390{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001391 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001392 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001393 int ret = 0;
1394
1395 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001396 int masked;
1397
1398 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1399 sync_set_bit(evtchn, sh->evtchn_pending);
1400 if (!masked)
1401 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001402 ret = 1;
1403 }
1404
1405 return ret;
1406}
1407
Ian Campbell0a852262011-03-10 16:08:06 +00001408static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001409{
1410 int pirq, rc, irq, gsi;
1411 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001412 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001413
Ian Campbell69c358c2011-03-10 16:08:13 +00001414 list_for_each_entry(info, &xen_irq_list_head, list) {
1415 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001416 continue;
1417
Ian Campbell69c358c2011-03-10 16:08:13 +00001418 pirq = info->u.pirq.pirq;
1419 gsi = info->u.pirq.gsi;
1420 irq = info->irq;
1421
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001422 /* save/restore of PT devices doesn't work, so at this point the
1423 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001424 if (!gsi)
1425 continue;
1426
1427 map_irq.domid = DOMID_SELF;
1428 map_irq.type = MAP_PIRQ_TYPE_GSI;
1429 map_irq.index = gsi;
1430 map_irq.pirq = pirq;
1431
1432 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1433 if (rc) {
1434 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1435 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001436 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001437 continue;
1438 }
1439
1440 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1441
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001442 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001443 }
1444}
1445
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001446static void restore_cpu_virqs(unsigned int cpu)
1447{
1448 struct evtchn_bind_virq bind_virq;
1449 int virq, irq, evtchn;
1450
1451 for (virq = 0; virq < NR_VIRQS; virq++) {
1452 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1453 continue;
1454
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001455 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001456
1457 /* Get a new binding from Xen. */
1458 bind_virq.virq = virq;
1459 bind_virq.vcpu = cpu;
1460 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1461 &bind_virq) != 0)
1462 BUG();
1463 evtchn = bind_virq.port;
1464
1465 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001466 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001467 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001468 }
1469}
1470
1471static void restore_cpu_ipis(unsigned int cpu)
1472{
1473 struct evtchn_bind_ipi bind_ipi;
1474 int ipi, irq, evtchn;
1475
1476 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1477 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1478 continue;
1479
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001480 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001481
1482 /* Get a new binding from Xen. */
1483 bind_ipi.vcpu = cpu;
1484 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1485 &bind_ipi) != 0)
1486 BUG();
1487 evtchn = bind_ipi.port;
1488
1489 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001490 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001491 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001492 }
1493}
1494
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001495/* Clear an irq's pending state, in preparation for polling on it */
1496void xen_clear_irq_pending(int irq)
1497{
1498 int evtchn = evtchn_from_irq(irq);
1499
1500 if (VALID_EVTCHN(evtchn))
1501 clear_evtchn(evtchn);
1502}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001503EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001504void xen_set_irq_pending(int irq)
1505{
1506 int evtchn = evtchn_from_irq(irq);
1507
1508 if (VALID_EVTCHN(evtchn))
1509 set_evtchn(evtchn);
1510}
1511
1512bool xen_test_irq_pending(int irq)
1513{
1514 int evtchn = evtchn_from_irq(irq);
1515 bool ret = false;
1516
1517 if (VALID_EVTCHN(evtchn))
1518 ret = test_evtchn(evtchn);
1519
1520 return ret;
1521}
1522
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001523/* Poll waiting for an irq to become pending with timeout. In the usual case,
1524 * the irq will be disabled so it won't deliver an interrupt. */
1525void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001526{
1527 evtchn_port_t evtchn = evtchn_from_irq(irq);
1528
1529 if (VALID_EVTCHN(evtchn)) {
1530 struct sched_poll poll;
1531
1532 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001533 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001534 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001535
1536 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1537 BUG();
1538 }
1539}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001540EXPORT_SYMBOL(xen_poll_irq_timeout);
1541/* Poll waiting for an irq to become pending. In the usual case, the
1542 * irq will be disabled so it won't deliver an interrupt. */
1543void xen_poll_irq(int irq)
1544{
1545 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1546}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001547
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001548/* Check whether the IRQ line is shared with other guests. */
1549int xen_test_irq_shared(int irq)
1550{
1551 struct irq_info *info = info_for_irq(irq);
1552 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1553
1554 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1555 return 0;
1556 return !(irq_status.flags & XENIRQSTAT_shared);
1557}
1558EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1559
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001560void xen_irq_resume(void)
1561{
Ian Campbell6cb65372011-03-10 16:08:11 +00001562 unsigned int cpu, evtchn;
1563 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001564
1565 init_evtchn_cpu_bindings();
1566
1567 /* New event-channel space is not 'live' yet. */
1568 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1569 mask_evtchn(evtchn);
1570
1571 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001572 list_for_each_entry(info, &xen_irq_list_head, list)
1573 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001574
1575 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1576 evtchn_to_irq[evtchn] = -1;
1577
1578 for_each_possible_cpu(cpu) {
1579 restore_cpu_virqs(cpu);
1580 restore_cpu_ipis(cpu);
1581 }
Ian Campbell69035912010-11-01 16:30:09 +00001582
Ian Campbell0a852262011-03-10 16:08:06 +00001583 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001584}
1585
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001586static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001587 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001588
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001589 .irq_disable = disable_dynirq,
1590 .irq_mask = disable_dynirq,
1591 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001592
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001593 .irq_ack = ack_dynirq,
1594 .irq_mask_ack = mask_ack_dynirq,
1595
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001596 .irq_set_affinity = set_affinity_irq,
1597 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001598};
1599
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001600static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001601 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001602
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001603 .irq_startup = startup_pirq,
1604 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001605 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001606 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001607
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001608 .irq_mask = disable_dynirq,
1609 .irq_unmask = enable_dynirq,
1610
1611 .irq_ack = eoi_pirq,
1612 .irq_eoi = eoi_pirq,
1613 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001614
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001615 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001616
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001617 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001618};
1619
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001620static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001621 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001622
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001623 .irq_disable = disable_dynirq,
1624 .irq_mask = disable_dynirq,
1625 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001626
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001627 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001628};
1629
Sheng Yang38e20b02010-05-14 12:40:51 +01001630int xen_set_callback_via(uint64_t via)
1631{
1632 struct xen_hvm_param a;
1633 a.domid = DOMID_SELF;
1634 a.index = HVM_PARAM_CALLBACK_IRQ;
1635 a.value = via;
1636 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1637}
1638EXPORT_SYMBOL_GPL(xen_set_callback_via);
1639
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001640#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001641/* Vector callbacks are better than PCI interrupts to receive event
1642 * channel notifications because we can receive vector callbacks on any
1643 * vcpu and we don't need PCI support or APIC interactions. */
1644void xen_callback_vector(void)
1645{
1646 int rc;
1647 uint64_t callback_via;
1648 if (xen_have_vector_callback) {
1649 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1650 rc = xen_set_callback_via(callback_via);
1651 if (rc) {
1652 printk(KERN_ERR "Request for Xen HVM callback vector"
1653 " failed.\n");
1654 xen_have_vector_callback = 0;
1655 return;
1656 }
1657 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1658 "enabled\n");
1659 /* in the restore case the vector has already been allocated */
1660 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1661 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1662 }
1663}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001664#else
1665void xen_callback_vector(void) {}
1666#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001667
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001668void __init xen_init_IRQ(void)
1669{
Stefano Stabellinie5fc7342010-12-01 14:51:44 +00001670 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001671
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001672 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1673 GFP_KERNEL);
Konrad Rzeszutek Wilk9d093e22011-09-29 13:31:21 -04001674 BUG_ON(!evtchn_to_irq);
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001675 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1676 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001677
1678 init_evtchn_cpu_bindings();
1679
1680 /* No event channels are 'live' right now. */
1681 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1682 mask_evtchn(i);
1683
Sheng Yang38e20b02010-05-14 12:40:51 +01001684 if (xen_hvm_domain()) {
1685 xen_callback_vector();
1686 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001687 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1688 * __acpi_register_gsi can point at the right function */
1689 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001690 } else {
1691 irq_ctx_init(smp_processor_id());
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +01001692 if (xen_initial_domain())
Konrad Rzeszutek Wilka0ee0562011-06-09 09:49:13 -04001693 pci_xen_initial_domain();
Sheng Yang38e20b02010-05-14 12:40:51 +01001694 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001695}