blob: 4035e833ea2643cb78ca5da68ed5db5037aee11d [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
Joe Perches283c0972013-06-28 03:21:41 -070024#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070026#include <linux/linkage.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080031#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040033#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010034#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000036#ifdef CONFIG_X86
Sheng Yang38e20b02010-05-14 12:40:51 +010037#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070038#include <asm/ptrace.h>
39#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080040#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040041#include <asm/io_apic.h>
Stefano Stabellini9846ff12012-01-30 16:21:48 +000042#include <asm/xen/page.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010043#include <asm/xen/pci.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000044#endif
45#include <asm/sync_bitops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070046#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070047#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070048
Sheng Yang38e20b02010-05-14 12:40:51 +010049#include <xen/xen.h>
50#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070051#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070052#include <xen/events.h>
53#include <xen/interface/xen.h>
54#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010055#include <xen/interface/hvm/hvm_op.h>
56#include <xen/interface/hvm/params.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000057#include <xen/interface/physdev.h>
58#include <xen/interface/sched.h>
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040059#include <xen/interface/vcpu.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000060#include <asm/hw_irq.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070061
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070062/*
63 * This lock protects updates to the following mapping and reference-count
64 * arrays. The lock does not need to be acquired to read the mapping tables.
65 */
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -040066static DEFINE_MUTEX(irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070067
Ian Campbell6cb65372011-03-10 16:08:11 +000068static LIST_HEAD(xen_irq_list_head);
69
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070070/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090071static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070072
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070073/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090074static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070075
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080076/* Interrupt types. */
77enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080078 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070079 IRQT_PIRQ,
80 IRQT_VIRQ,
81 IRQT_IPI,
82 IRQT_EVTCHN
83};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070084
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080085/*
86 * Packed IRQ information:
87 * type - enum xen_irq_type
88 * event channel - irq->event channel mapping
89 * cpu - cpu this event channel is bound to
90 * index - type-specific information:
Jan Beulichdec02de2013-04-03 15:52:50 +010091 * PIRQ - physical IRQ, GSI, flags, and owner domain
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080092 * VIRQ - virq number
93 * IPI - IPI vector
94 * EVTCHN -
95 */
Ruslan Pisarev088c05a2011-07-26 14:16:13 +030096struct irq_info {
Ian Campbell6cb65372011-03-10 16:08:11 +000097 struct list_head list;
Daniel De Graaf420eb552011-10-27 17:58:47 -040098 int refcnt;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080099 enum xen_irq_type type; /* type */
Ian Campbell6cb65372011-03-10 16:08:11 +0000100 unsigned irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800101 unsigned short evtchn; /* event channel */
102 unsigned short cpu; /* cpu bound */
103
104 union {
105 unsigned short virq;
106 enum ipi_vector ipi;
107 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100108 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800109 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400110 unsigned char flags;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400111 uint16_t domid;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800112 } pirq;
113 } u;
114};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400115#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400116#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800117
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400118static int *evtchn_to_irq;
Ian Campbellbf86ad82012-10-17 09:39:12 +0100119#ifdef CONFIG_X86
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000120static unsigned long *pirq_eoi_map;
Ian Campbellbf86ad82012-10-17 09:39:12 +0100121#endif
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000122static bool (*pirq_needs_eoi)(unsigned irq);
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700123
Ian Campbellc81611c2013-02-20 11:48:06 +0000124/*
125 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
126 * careful to only use bitops which allow for this (e.g
127 * test_bit/find_first_bit and friends but not __ffs) and to pass
128 * BITS_PER_EVTCHN_WORD as the bitmask length.
129 */
130#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
131/*
132 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
133 * array. Primarily to avoid long lines (hence the terse name).
134 */
135#define BM(x) (unsigned long *)(x)
136/* Find the first set bit in a evtchn mask */
137#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
138
139static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
Ian Campbellcb60d112011-03-10 16:08:08 +0000140 cpu_evtchn_mask);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700141
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700142/* Xen will never allocate port zero for any purpose. */
143#define VALID_EVTCHN(chn) ((chn) != 0)
144
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700145static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700146static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400147static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100148static void enable_dynirq(struct irq_data *data);
149static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700150
Ian Campbell9158c352011-03-10 16:08:09 +0000151/* Get info for IRQ */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800152static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700153{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100154 return irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700155}
156
Ian Campbell9158c352011-03-10 16:08:09 +0000157/* Constructors for packed IRQ information. */
158static void xen_irq_info_common_init(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000159 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000160 enum xen_irq_type type,
161 unsigned short evtchn,
162 unsigned short cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700163{
Ian Campbell9158c352011-03-10 16:08:09 +0000164
165 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
166
167 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000168 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000169 info->evtchn = evtchn;
170 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000171
172 evtchn_to_irq[evtchn] = irq;
Julien Grall934f5852013-04-30 18:29:13 +0100173
174 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700175}
176
Ian Campbell9158c352011-03-10 16:08:09 +0000177static void xen_irq_info_evtchn_init(unsigned irq,
178 unsigned short evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700179{
Ian Campbell9158c352011-03-10 16:08:09 +0000180 struct irq_info *info = info_for_irq(irq);
181
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000182 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700183}
184
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000185static void xen_irq_info_ipi_init(unsigned cpu,
186 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000187 unsigned short evtchn,
188 enum ipi_vector ipi)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700189{
Ian Campbell9158c352011-03-10 16:08:09 +0000190 struct irq_info *info = info_for_irq(irq);
191
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000192 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000193
194 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000195
196 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700197}
198
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000199static void xen_irq_info_virq_init(unsigned cpu,
200 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000201 unsigned short evtchn,
202 unsigned short virq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700203{
Ian Campbell9158c352011-03-10 16:08:09 +0000204 struct irq_info *info = info_for_irq(irq);
205
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000206 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000207
208 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000209
210 per_cpu(virq_to_irq, cpu)[virq] = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000211}
212
213static void xen_irq_info_pirq_init(unsigned irq,
214 unsigned short evtchn,
215 unsigned short pirq,
216 unsigned short gsi,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400217 uint16_t domid,
Ian Campbell9158c352011-03-10 16:08:09 +0000218 unsigned char flags)
219{
220 struct irq_info *info = info_for_irq(irq);
221
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000222 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000223
224 info->u.pirq.pirq = pirq;
225 info->u.pirq.gsi = gsi;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400226 info->u.pirq.domid = domid;
Ian Campbell9158c352011-03-10 16:08:09 +0000227 info->u.pirq.flags = flags;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700228}
229
230/*
231 * Accessors for packed IRQ information.
232 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800233static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700234{
Joe Jin110e7c72011-01-07 14:50:12 +0800235 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
236 return 0;
237
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800238 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700239}
240
Ian Campbelld4c04532009-02-06 19:20:31 -0800241unsigned irq_from_evtchn(unsigned int evtchn)
242{
243 return evtchn_to_irq[evtchn];
244}
245EXPORT_SYMBOL_GPL(irq_from_evtchn);
246
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800247static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700248{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800249 struct irq_info *info = info_for_irq(irq);
250
251 BUG_ON(info == NULL);
252 BUG_ON(info->type != IRQT_IPI);
253
254 return info->u.ipi;
255}
256
257static unsigned virq_from_irq(unsigned irq)
258{
259 struct irq_info *info = info_for_irq(irq);
260
261 BUG_ON(info == NULL);
262 BUG_ON(info->type != IRQT_VIRQ);
263
264 return info->u.virq;
265}
266
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100267static unsigned pirq_from_irq(unsigned irq)
268{
269 struct irq_info *info = info_for_irq(irq);
270
271 BUG_ON(info == NULL);
272 BUG_ON(info->type != IRQT_PIRQ);
273
274 return info->u.pirq.pirq;
275}
276
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800277static enum xen_irq_type type_from_irq(unsigned irq)
278{
279 return info_for_irq(irq)->type;
280}
281
282static unsigned cpu_from_irq(unsigned irq)
283{
284 return info_for_irq(irq)->cpu;
285}
286
287static unsigned int cpu_from_evtchn(unsigned int evtchn)
288{
289 int irq = evtchn_to_irq[evtchn];
290 unsigned ret = 0;
291
292 if (irq != -1)
293 ret = cpu_from_irq(irq);
294
295 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700296}
297
Ian Campbellbf86ad82012-10-17 09:39:12 +0100298#ifdef CONFIG_X86
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000299static bool pirq_check_eoi_map(unsigned irq)
300{
Stefano Stabellini521394e2012-04-25 16:11:38 +0100301 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000302}
Ian Campbellbf86ad82012-10-17 09:39:12 +0100303#endif
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000304
305static bool pirq_needs_eoi_flag(unsigned irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400306{
307 struct irq_info *info = info_for_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400308 BUG_ON(info->type != IRQT_PIRQ);
309
310 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
311}
312
Ian Campbellc81611c2013-02-20 11:48:06 +0000313static inline xen_ulong_t active_evtchns(unsigned int cpu,
314 struct shared_info *sh,
315 unsigned int idx)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700316{
Ruslan Pisarev088c05a2011-07-26 14:16:13 +0300317 return sh->evtchn_pending[idx] &
Ian Campbellcb60d112011-03-10 16:08:08 +0000318 per_cpu(cpu_evtchn_mask, cpu)[idx] &
Ruslan Pisarev088c05a2011-07-26 14:16:13 +0300319 ~sh->evtchn_mask[idx];
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700320}
321
322static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
323{
324 int irq = evtchn_to_irq[chn];
325
326 BUG_ON(irq == -1);
327#ifdef CONFIG_SMP
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000328 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700329#endif
330
Ian Campbellc81611c2013-02-20 11:48:06 +0000331 clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
332 set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700333
Ian Campbellca62ce82011-03-10 16:08:12 +0000334 info_for_irq(irq)->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700335}
336
337static void init_evtchn_cpu_bindings(void)
338{
Jan Beulich1c6969e2010-11-16 14:55:33 -0800339 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700340#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000341 struct irq_info *info;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200342
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700343 /* By default all event channels notify CPU#0. */
Ian Campbell6cb65372011-03-10 16:08:11 +0000344 list_for_each_entry(info, &xen_irq_list_head, list) {
345 struct irq_desc *desc = irq_to_desc(info->irq);
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000346 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800347 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700348#endif
349
Jan Beulich1c6969e2010-11-16 14:55:33 -0800350 for_each_possible_cpu(i)
Ian Campbellcb60d112011-03-10 16:08:08 +0000351 memset(per_cpu(cpu_evtchn_mask, i),
David Vrabel84ca7a82013-08-15 13:21:06 +0100352 (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700353}
354
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700355static inline void clear_evtchn(int port)
356{
357 struct shared_info *s = HYPERVISOR_shared_info;
Ian Campbellc81611c2013-02-20 11:48:06 +0000358 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700359}
360
361static inline void set_evtchn(int port)
362{
363 struct shared_info *s = HYPERVISOR_shared_info;
Ian Campbellc81611c2013-02-20 11:48:06 +0000364 sync_set_bit(port, BM(&s->evtchn_pending[0]));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700365}
366
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700367static inline int test_evtchn(int port)
368{
369 struct shared_info *s = HYPERVISOR_shared_info;
Ian Campbellc81611c2013-02-20 11:48:06 +0000370 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700371}
372
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700373
374/**
375 * notify_remote_via_irq - send event to remote end of event channel via irq
376 * @irq: irq of event channel to send event to
377 *
378 * Unlike notify_remote_via_evtchn(), this is safe to use across
379 * save/restore. Notifications on a broken connection are silently
380 * dropped.
381 */
382void notify_remote_via_irq(int irq)
383{
384 int evtchn = evtchn_from_irq(irq);
385
386 if (VALID_EVTCHN(evtchn))
387 notify_remote_via_evtchn(evtchn);
388}
389EXPORT_SYMBOL_GPL(notify_remote_via_irq);
390
391static void mask_evtchn(int port)
392{
393 struct shared_info *s = HYPERVISOR_shared_info;
Ian Campbellc81611c2013-02-20 11:48:06 +0000394 sync_set_bit(port, BM(&s->evtchn_mask[0]));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700395}
396
397static void unmask_evtchn(int port)
398{
399 struct shared_info *s = HYPERVISOR_shared_info;
400 unsigned int cpu = get_cpu();
Stefano Stabellinib5e57922012-08-22 17:20:11 +0100401 int do_hypercall = 0, evtchn_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700402
403 BUG_ON(!irqs_disabled());
404
Stefano Stabellinib5e57922012-08-22 17:20:11 +0100405 if (unlikely((cpu != cpu_from_evtchn(port))))
406 do_hypercall = 1;
David Vrabelc26377e2013-03-25 14:11:19 +0000407 else {
408 /*
409 * Need to clear the mask before checking pending to
410 * avoid a race with an event becoming pending.
411 *
412 * EVTCHNOP_unmask will only trigger an upcall if the
413 * mask bit was set, so if a hypercall is needed
414 * remask the event.
415 */
416 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
Ian Campbellc81611c2013-02-20 11:48:06 +0000417 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
Stefano Stabellinib5e57922012-08-22 17:20:11 +0100418
David Vrabelc26377e2013-03-25 14:11:19 +0000419 if (unlikely(evtchn_pending && xen_hvm_domain())) {
420 sync_set_bit(port, BM(&s->evtchn_mask[0]));
421 do_hypercall = 1;
422 }
423 }
Stefano Stabellinib5e57922012-08-22 17:20:11 +0100424
425 /* Slow path (hypercall) if this is a non-local port or if this is
426 * an hvm domain and an event is pending (hvm domains don't have
427 * their own implementation of irq_enable). */
428 if (do_hypercall) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700429 struct evtchn_unmask unmask = { .port = port };
430 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
431 } else {
Christoph Lameter780f36d2010-12-06 11:16:29 -0600432 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700433
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700434 /*
435 * The following is basically the equivalent of
436 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
437 * the interrupt edge' if the channel is masked.
438 */
Stefano Stabellinib5e57922012-08-22 17:20:11 +0100439 if (evtchn_pending &&
Ian Campbellc81611c2013-02-20 11:48:06 +0000440 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
441 BM(&vcpu_info->evtchn_pending_sel)))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700442 vcpu_info->evtchn_upcall_pending = 1;
443 }
444
445 put_cpu();
446}
447
Ian Campbell6cb65372011-03-10 16:08:11 +0000448static void xen_irq_init(unsigned irq)
449{
450 struct irq_info *info;
Konrad Rzeszutek Wilkb5328cd2011-06-15 14:24:29 -0400451#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000452 struct irq_desc *desc = irq_to_desc(irq);
453
454 /* By default all event channels notify CPU#0. */
455 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400456#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000457
Ian Campbellca62ce82011-03-10 16:08:12 +0000458 info = kzalloc(sizeof(*info), GFP_KERNEL);
459 if (info == NULL)
460 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000461
462 info->type = IRQT_UNBOUND;
Daniel De Graaf420eb552011-10-27 17:58:47 -0400463 info->refcnt = -1;
Ian Campbell6cb65372011-03-10 16:08:11 +0000464
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100465 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000466
Ian Campbell6cb65372011-03-10 16:08:11 +0000467 list_add_tail(&info->list, &xen_irq_list_head);
468}
469
Ian Campbell7bee9762011-03-10 16:08:15 +0000470static int __must_check xen_allocate_irq_dynamic(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700471{
Ian Campbell89911502011-03-03 11:57:44 -0500472 int first = 0;
473 int irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700474
Ian Campbell89911502011-03-03 11:57:44 -0500475#ifdef CONFIG_X86_IO_APIC
476 /*
477 * For an HVM guest or domain 0 which see "real" (emulated or
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300478 * actual respectively) GSIs we allocate dynamic IRQs
Ian Campbell89911502011-03-03 11:57:44 -0500479 * e.g. those corresponding to event channels or MSIs
480 * etc. from the range above those "real" GSIs to avoid
481 * collisions.
Konrad Rzeszutek Wilkd1b758e2010-12-09 14:53:29 -0500482 */
Ian Campbell89911502011-03-03 11:57:44 -0500483 if (xen_initial_domain() || xen_hvm_domain())
484 first = get_nr_irqs_gsi();
485#endif
486
Ian Campbell89911502011-03-03 11:57:44 -0500487 irq = irq_alloc_desc_from(first, -1);
488
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400489 if (irq >= 0)
490 xen_irq_init(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800491
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700492 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400493}
494
Ian Campbell7bee9762011-03-10 16:08:15 +0000495static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000496{
497 int irq;
498
Ian Campbell89911502011-03-03 11:57:44 -0500499 /*
500 * A PV guest has no concept of a GSI (since it has no ACPI
501 * nor access to/knowledge of the physical APICs). Therefore
502 * all IRQs are dynamically allocated from the entire IRQ
503 * space.
504 */
505 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000506 return xen_allocate_irq_dynamic();
507
508 /* Legacy IRQ descriptors are already allocated by the arch. */
509 if (gsi < NR_IRQS_LEGACY)
Ian Campbell6cb65372011-03-10 16:08:11 +0000510 irq = gsi;
511 else
512 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000513
Ian Campbell6cb65372011-03-10 16:08:11 +0000514 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000515
516 return irq;
517}
518
519static void xen_free_irq(unsigned irq)
520{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100521 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000522
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -0400523 if (WARN_ON(!info))
524 return;
525
Ian Campbell6cb65372011-03-10 16:08:11 +0000526 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000527
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100528 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000529
Daniel De Graaf420eb552011-10-27 17:58:47 -0400530 WARN_ON(info->refcnt > 0);
531
Ian Campbellca62ce82011-03-10 16:08:12 +0000532 kfree(info);
533
Ian Campbell72146102011-02-03 09:49:35 +0000534 /* Legacy IRQ descriptors are managed by the arch. */
535 if (irq < NR_IRQS_LEGACY)
536 return;
537
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000538 irq_free_desc(irq);
539}
540
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400541static void pirq_query_unmask(int irq)
542{
543 struct physdev_irq_status_query irq_status;
544 struct irq_info *info = info_for_irq(irq);
545
546 BUG_ON(info->type != IRQT_PIRQ);
547
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100548 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400549 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
550 irq_status.flags = 0;
551
552 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
553 if (irq_status.flags & XENIRQSTAT_needs_eoi)
554 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
555}
556
557static bool probing_irq(int irq)
558{
559 struct irq_desc *desc = irq_to_desc(irq);
560
561 return desc && desc->action == NULL;
562}
563
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100564static void eoi_pirq(struct irq_data *data)
565{
566 int evtchn = evtchn_from_irq(data->irq);
567 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
568 int rc = 0;
569
570 irq_move_irq(data);
571
572 if (VALID_EVTCHN(evtchn))
573 clear_evtchn(evtchn);
574
575 if (pirq_needs_eoi(data->irq)) {
576 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
577 WARN_ON(rc);
578 }
579}
580
581static void mask_ack_pirq(struct irq_data *data)
582{
583 disable_dynirq(data);
584 eoi_pirq(data);
585}
586
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000587static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400588{
589 struct evtchn_bind_pirq bind_pirq;
590 struct irq_info *info = info_for_irq(irq);
591 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400592 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400593
594 BUG_ON(info->type != IRQT_PIRQ);
595
596 if (VALID_EVTCHN(evtchn))
597 goto out;
598
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100599 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400600 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400601 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
602 BIND_PIRQ__WILL_SHARE : 0;
603 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
604 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400605 if (!probing_irq(irq))
Joe Perches283c0972013-06-28 03:21:41 -0700606 pr_info("Failed to obtain physical IRQ %d\n", irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400607 return 0;
608 }
609 evtchn = bind_pirq.port;
610
611 pirq_query_unmask(irq);
612
613 evtchn_to_irq[evtchn] = irq;
614 bind_evtchn_to_cpu(evtchn, 0);
615 info->evtchn = evtchn;
616
617out:
618 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100619 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400620
621 return 0;
622}
623
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000624static unsigned int startup_pirq(struct irq_data *data)
625{
626 return __startup_pirq(data->irq);
627}
628
629static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400630{
631 struct evtchn_close close;
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000632 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400633 struct irq_info *info = info_for_irq(irq);
634 int evtchn = evtchn_from_irq(irq);
635
636 BUG_ON(info->type != IRQT_PIRQ);
637
638 if (!VALID_EVTCHN(evtchn))
639 return;
640
641 mask_evtchn(evtchn);
642
643 close.port = evtchn;
644 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
645 BUG();
646
647 bind_evtchn_to_cpu(evtchn, 0);
648 evtchn_to_irq[evtchn] = -1;
649 info->evtchn = 0;
650}
651
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000652static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400653{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000654 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400655}
656
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000657static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400658{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100659 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400660}
661
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100662int xen_irq_from_gsi(unsigned gsi)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400663{
Ian Campbell6cb65372011-03-10 16:08:11 +0000664 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400665
Ian Campbell6cb65372011-03-10 16:08:11 +0000666 list_for_each_entry(info, &xen_irq_list_head, list) {
667 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400668 continue;
669
Ian Campbell6cb65372011-03-10 16:08:11 +0000670 if (info->u.pirq.gsi == gsi)
671 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400672 }
673
674 return -1;
675}
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100676EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400677
Ian Campbell653378a2011-03-10 16:08:04 +0000678/*
679 * Do not make any assumptions regarding the relationship between the
680 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100681 *
682 * Note: We don't assign an event channel until the irq actually started
683 * up. Return an existing irq if we've already got one for the gsi.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100684 *
685 * Shareable implies level triggered, not shareable implies edge
686 * triggered here.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400687 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000688int xen_bind_pirq_gsi_to_irq(unsigned gsi,
689 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400690{
Ian Campbella0e18112011-03-10 16:08:03 +0000691 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400692 struct physdev_irq irq_op;
693
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400694 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400695
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100696 irq = xen_irq_from_gsi(gsi);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400697 if (irq != -1) {
Joe Perches283c0972013-06-28 03:21:41 -0700698 pr_info("%s: returning irq %d for gsi %u\n",
699 __func__, irq, gsi);
Daniel De Graaf420eb552011-10-27 17:58:47 -0400700 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400701 }
702
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000703 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000704 if (irq < 0)
705 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400706
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400707 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400708 irq_op.vector = 0;
709
710 /* Only the privileged domain can do this. For non-priv, the pcifront
711 * driver provides a PCI bus that does the call to do exactly
712 * this in the priv domain. */
713 if (xen_initial_domain() &&
714 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000715 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400716 irq = -ENOSPC;
717 goto out;
718 }
719
Jan Beulichdec02de2013-04-03 15:52:50 +0100720 xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
Ian Campbell9158c352011-03-10 16:08:09 +0000721 shareable ? PIRQ_SHAREABLE : 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400722
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100723 pirq_query_unmask(irq);
724 /* We try to use the handler with the appropriate semantic for the
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100725 * type of interrupt: if the interrupt is an edge triggered
726 * interrupt we use handle_edge_irq.
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100727 *
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100728 * On the other hand if the interrupt is level triggered we use
729 * handle_fasteoi_irq like the native code does for this kind of
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100730 * interrupts.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100731 *
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100732 * Depending on the Xen version, pirq_needs_eoi might return true
733 * not only for level triggered interrupts but for edge triggered
734 * interrupts too. In any case Xen always honors the eoi mechanism,
735 * not injecting any more pirqs of the same kind if the first one
736 * hasn't received an eoi yet. Therefore using the fasteoi handler
737 * is the right choice either way.
738 */
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100739 if (shareable)
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100740 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
741 handle_fasteoi_irq, name);
742 else
743 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
744 handle_edge_irq, name);
745
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400746out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400747 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400748
749 return irq;
750}
751
Qing Hef731e3ef2010-10-11 15:30:09 +0100752#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000753int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000754{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000755 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000756 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000757
Ian Campbellbf480d92011-02-18 16:43:32 +0000758 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000759 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000760
Ian Campbell5cad61a2011-02-18 16:43:31 +0000761 WARN_ONCE(rc == -ENOSYS,
762 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
763
764 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000765}
766
Ian Campbellbf480d92011-02-18 16:43:32 +0000767int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Jan Beulichdec02de2013-04-03 15:52:50 +0100768 int pirq, const char *name, domid_t domid)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100769{
Ian Campbellbf480d92011-02-18 16:43:32 +0000770 int irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000771
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400772 mutex_lock(&irq_mapping_update_lock);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100773
Ian Campbell4b41df72011-02-18 16:43:29 +0000774 irq = xen_allocate_irq_dynamic();
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400775 if (irq < 0)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000776 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100777
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100778 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
779 name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100780
Jan Beulichdec02de2013-04-03 15:52:50 +0100781 xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700782 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000783 if (ret < 0)
784 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100785out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400786 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000787 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000788error_irq:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400789 mutex_unlock(&irq_mapping_update_lock);
Ian Campbellbf480d92011-02-18 16:43:32 +0000790 xen_free_irq(irq);
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400791 return ret;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100792}
Qing Hef731e3ef2010-10-11 15:30:09 +0100793#endif
794
Alex Nixonb5401a92010-03-18 16:31:34 -0400795int xen_destroy_irq(int irq)
796{
797 struct irq_desc *desc;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100798 struct physdev_unmap_pirq unmap_irq;
799 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400800 int rc = -ENOENT;
801
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400802 mutex_lock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400803
804 desc = irq_to_desc(irq);
805 if (!desc)
806 goto out;
807
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100808 if (xen_initial_domain()) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500809 unmap_irq.pirq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400810 unmap_irq.domid = info->u.pirq.domid;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100811 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500812 /* If another domain quits without making the pci_disable_msix
813 * call, the Xen hypervisor takes care of freeing the PIRQs
814 * (free_domain_pirqs).
815 */
816 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
Joe Perches283c0972013-06-28 03:21:41 -0700817 pr_info("domain %d does not have %d anymore\n",
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500818 info->u.pirq.domid, info->u.pirq.pirq);
819 else if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -0700820 pr_warn("unmap irq failed %d\n", rc);
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100821 goto out;
822 }
823 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400824
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000825 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400826
827out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400828 mutex_unlock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400829 return rc;
830}
831
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000832int xen_irq_from_pirq(unsigned pirq)
833{
Ian Campbell69c358c2011-03-10 16:08:13 +0000834 int irq;
835
836 struct irq_info *info;
837
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400838 mutex_lock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000839
840 list_for_each_entry(info, &xen_irq_list_head, list) {
Konrad Rzeszutek Wilk9bb9efe2011-09-29 13:13:30 -0400841 if (info->type != IRQT_PIRQ)
Ian Campbell69c358c2011-03-10 16:08:13 +0000842 continue;
843 irq = info->irq;
844 if (info->u.pirq.pirq == pirq)
845 goto out;
846 }
847 irq = -1;
848out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400849 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000850
851 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000852}
853
Konrad Rzeszutek Wilke6197ac2011-02-24 14:20:12 -0500854
855int xen_pirq_from_irq(unsigned irq)
856{
857 return pirq_from_irq(irq);
858}
859EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700860int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700861{
862 int irq;
863
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400864 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700865
866 irq = evtchn_to_irq[evtchn];
867
868 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000869 irq = xen_allocate_irq_dynamic();
Wei Liu68ba45f2013-01-31 14:46:56 +0000870 if (irq < 0)
Ian Campbell7bee9762011-03-10 16:08:15 +0000871 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700872
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100873 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100874 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700875
Ian Campbell9158c352011-03-10 16:08:09 +0000876 xen_irq_info_evtchn_init(irq, evtchn);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -0400877 } else {
878 struct irq_info *info = info_for_irq(irq);
879 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700880 }
881
Ian Campbell7bee9762011-03-10 16:08:15 +0000882out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400883 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700884
885 return irq;
886}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700887EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700888
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700889static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
890{
891 struct evtchn_bind_ipi bind_ipi;
892 int evtchn, irq;
893
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400894 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700895
896 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800897
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700898 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000899 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700900 if (irq < 0)
901 goto out;
902
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100903 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700904 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700905
906 bind_ipi.vcpu = cpu;
907 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
908 &bind_ipi) != 0)
909 BUG();
910 evtchn = bind_ipi.port;
911
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000912 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700913
914 bind_evtchn_to_cpu(evtchn, cpu);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -0400915 } else {
916 struct irq_info *info = info_for_irq(irq);
917 WARN_ON(info == NULL || info->type != IRQT_IPI);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700918 }
919
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700920 out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400921 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700922 return irq;
923}
924
Ian Campbell2e820f52009-02-09 12:05:50 -0800925static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
926 unsigned int remote_port)
927{
928 struct evtchn_bind_interdomain bind_interdomain;
929 int err;
930
931 bind_interdomain.remote_dom = remote_domain;
932 bind_interdomain.remote_port = remote_port;
933
934 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
935 &bind_interdomain);
936
937 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
938}
939
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200940static int find_virq(unsigned int virq, unsigned int cpu)
941{
942 struct evtchn_status status;
943 int port, rc = -ENOENT;
944
945 memset(&status, 0, sizeof(status));
946 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
947 status.dom = DOMID_SELF;
948 status.port = port;
949 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
950 if (rc < 0)
951 continue;
952 if (status.status != EVTCHNSTAT_virq)
953 continue;
954 if (status.u.virq == virq && status.vcpu == cpu) {
955 rc = port;
956 break;
957 }
958 }
959 return rc;
960}
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700961
Jeremy Fitzhardinge4fe7d5a2010-09-02 16:17:06 +0100962int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700963{
964 struct evtchn_bind_virq bind_virq;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200965 int evtchn, irq, ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700966
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400967 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700968
969 irq = per_cpu(virq_to_irq, cpu)[virq];
970
971 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000972 irq = xen_allocate_irq_dynamic();
Wei Liu68ba45f2013-01-31 14:46:56 +0000973 if (irq < 0)
Ian Campbell7bee9762011-03-10 16:08:15 +0000974 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700975
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100976 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700977 handle_percpu_irq, "virq");
978
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700979 bind_virq.virq = virq;
980 bind_virq.vcpu = cpu;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200981 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
982 &bind_virq);
983 if (ret == 0)
984 evtchn = bind_virq.port;
985 else {
986 if (ret == -EEXIST)
987 ret = find_virq(virq, cpu);
988 BUG_ON(ret < 0);
989 evtchn = ret;
990 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700991
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000992 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700993
994 bind_evtchn_to_cpu(evtchn, cpu);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -0400995 } else {
996 struct irq_info *info = info_for_irq(irq);
997 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700998 }
999
Ian Campbell7bee9762011-03-10 16:08:15 +00001000out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001001 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001002
1003 return irq;
1004}
1005
1006static void unbind_from_irq(unsigned int irq)
1007{
1008 struct evtchn_close close;
1009 int evtchn = evtchn_from_irq(irq);
Daniel De Graaf420eb552011-10-27 17:58:47 -04001010 struct irq_info *info = irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001011
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001012 if (WARN_ON(!info))
1013 return;
1014
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001015 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001016
Daniel De Graaf420eb552011-10-27 17:58:47 -04001017 if (info->refcnt > 0) {
1018 info->refcnt--;
1019 if (info->refcnt != 0)
1020 goto done;
1021 }
1022
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001023 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001024 close.port = evtchn;
1025 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
1026 BUG();
1027
1028 switch (type_from_irq(irq)) {
1029 case IRQT_VIRQ:
1030 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001031 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001032 break;
Alex Nixond68d82a2008-08-22 11:52:15 +01001033 case IRQT_IPI:
1034 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001035 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +01001036 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001037 default:
1038 break;
1039 }
1040
1041 /* Closed ports are implicitly re-bound to VCPU0. */
1042 bind_evtchn_to_cpu(evtchn, 0);
1043
1044 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +00001045 }
1046
Ian Campbellca62ce82011-03-10 16:08:12 +00001047 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001048
Ian Campbell9158c352011-03-10 16:08:09 +00001049 xen_free_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001050
Daniel De Graaf420eb552011-10-27 17:58:47 -04001051 done:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001052 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001053}
1054
1055int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -04001056 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001057 unsigned long irqflags,
1058 const char *devname, void *dev_id)
1059{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001060 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001061
1062 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +00001063 if (irq < 0)
1064 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001065 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1066 if (retval != 0) {
1067 unbind_from_irq(irq);
1068 return retval;
1069 }
1070
1071 return irq;
1072}
1073EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1074
Ian Campbell2e820f52009-02-09 12:05:50 -08001075int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1076 unsigned int remote_port,
1077 irq_handler_t handler,
1078 unsigned long irqflags,
1079 const char *devname,
1080 void *dev_id)
1081{
1082 int irq, retval;
1083
1084 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1085 if (irq < 0)
1086 return irq;
1087
1088 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1089 if (retval != 0) {
1090 unbind_from_irq(irq);
1091 return retval;
1092 }
1093
1094 return irq;
1095}
1096EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1097
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001098int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -04001099 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001100 unsigned long irqflags, const char *devname, void *dev_id)
1101{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001102 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001103
1104 irq = bind_virq_to_irq(virq, cpu);
Ian Campbell7bee9762011-03-10 16:08:15 +00001105 if (irq < 0)
1106 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001107 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1108 if (retval != 0) {
1109 unbind_from_irq(irq);
1110 return retval;
1111 }
1112
1113 return irq;
1114}
1115EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1116
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001117int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1118 unsigned int cpu,
1119 irq_handler_t handler,
1120 unsigned long irqflags,
1121 const char *devname,
1122 void *dev_id)
1123{
1124 int irq, retval;
1125
1126 irq = bind_ipi_to_irq(ipi, cpu);
1127 if (irq < 0)
1128 return irq;
1129
Ian Campbell9bab0b72011-10-03 15:37:00 +01001130 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001131 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1132 if (retval != 0) {
1133 unbind_from_irq(irq);
1134 return retval;
1135 }
1136
1137 return irq;
1138}
1139
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001140void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1141{
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001142 struct irq_info *info = irq_get_handler_data(irq);
1143
1144 if (WARN_ON(!info))
1145 return;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001146 free_irq(irq, dev_id);
1147 unbind_from_irq(irq);
1148}
1149EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1150
Daniel De Graaf420eb552011-10-27 17:58:47 -04001151int evtchn_make_refcounted(unsigned int evtchn)
1152{
1153 int irq = evtchn_to_irq[evtchn];
1154 struct irq_info *info;
1155
1156 if (irq == -1)
1157 return -ENOENT;
1158
1159 info = irq_get_handler_data(irq);
1160
1161 if (!info)
1162 return -ENOENT;
1163
1164 WARN_ON(info->refcnt != -1);
1165
1166 info->refcnt = 1;
1167
1168 return 0;
1169}
1170EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1171
1172int evtchn_get(unsigned int evtchn)
1173{
1174 int irq;
1175 struct irq_info *info;
1176 int err = -ENOENT;
1177
Daniel De Graafc3b3f162011-11-28 11:49:09 -05001178 if (evtchn >= NR_EVENT_CHANNELS)
1179 return -EINVAL;
1180
Daniel De Graaf420eb552011-10-27 17:58:47 -04001181 mutex_lock(&irq_mapping_update_lock);
1182
1183 irq = evtchn_to_irq[evtchn];
1184 if (irq == -1)
1185 goto done;
1186
1187 info = irq_get_handler_data(irq);
1188
1189 if (!info)
1190 goto done;
1191
1192 err = -EINVAL;
1193 if (info->refcnt <= 0)
1194 goto done;
1195
1196 info->refcnt++;
1197 err = 0;
1198 done:
1199 mutex_unlock(&irq_mapping_update_lock);
1200
1201 return err;
1202}
1203EXPORT_SYMBOL_GPL(evtchn_get);
1204
1205void evtchn_put(unsigned int evtchn)
1206{
1207 int irq = evtchn_to_irq[evtchn];
1208 if (WARN_ON(irq == -1))
1209 return;
1210 unbind_from_irq(irq);
1211}
1212EXPORT_SYMBOL_GPL(evtchn_put);
1213
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001214void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1215{
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001216 int irq;
1217
Stefano Stabellini072b2062013-08-13 16:57:06 +00001218#ifdef CONFIG_X86
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001219 if (unlikely(vector == XEN_NMI_VECTOR)) {
1220 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1221 if (rc < 0)
1222 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1223 return;
1224 }
Stefano Stabellini072b2062013-08-13 16:57:06 +00001225#endif
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001226 irq = per_cpu(ipi_to_irq, cpu)[vector];
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001227 BUG_ON(irq < 0);
1228 notify_remote_via_irq(irq);
1229}
1230
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001231irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1232{
1233 struct shared_info *sh = HYPERVISOR_shared_info;
1234 int cpu = smp_processor_id();
Ian Campbellc81611c2013-02-20 11:48:06 +00001235 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001236 int i;
1237 unsigned long flags;
1238 static DEFINE_SPINLOCK(debug_lock);
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001239 struct vcpu_info *v;
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001240
1241 spin_lock_irqsave(&debug_lock, flags);
1242
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001243 printk("\nvcpu %d\n ", cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001244
1245 for_each_online_cpu(i) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001246 int pending;
1247 v = per_cpu(xen_vcpu, i);
1248 pending = (get_irq_regs() && i == cpu)
1249 ? xen_irqs_disabled(get_irq_regs())
1250 : v->evtchn_upcall_mask;
Ian Campbellc81611c2013-02-20 11:48:06 +00001251 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001252 pending, v->evtchn_upcall_pending,
1253 (int)(sizeof(v->evtchn_pending_sel)*2),
1254 v->evtchn_pending_sel);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001255 }
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001256 v = per_cpu(xen_vcpu, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001257
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001258 printk("\npending:\n ");
1259 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
Ian Campbellc81611c2013-02-20 11:48:06 +00001260 printk("%0*"PRI_xen_ulong"%s",
1261 (int)sizeof(sh->evtchn_pending[0])*2,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001262 sh->evtchn_pending[i],
1263 i % 8 == 0 ? "\n " : " ");
1264 printk("\nglobal mask:\n ");
1265 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
Ian Campbellc81611c2013-02-20 11:48:06 +00001266 printk("%0*"PRI_xen_ulong"%s",
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001267 (int)(sizeof(sh->evtchn_mask[0])*2),
1268 sh->evtchn_mask[i],
1269 i % 8 == 0 ? "\n " : " ");
1270
1271 printk("\nglobally unmasked:\n ");
1272 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
Ian Campbellc81611c2013-02-20 11:48:06 +00001273 printk("%0*"PRI_xen_ulong"%s",
1274 (int)(sizeof(sh->evtchn_mask[0])*2),
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001275 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1276 i % 8 == 0 ? "\n " : " ");
1277
1278 printk("\nlocal cpu%d mask:\n ", cpu);
Ian Campbellc81611c2013-02-20 11:48:06 +00001279 for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
1280 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001281 cpu_evtchn[i],
1282 i % 8 == 0 ? "\n " : " ");
1283
1284 printk("\nlocally unmasked:\n ");
1285 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
Ian Campbellc81611c2013-02-20 11:48:06 +00001286 xen_ulong_t pending = sh->evtchn_pending[i]
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001287 & ~sh->evtchn_mask[i]
1288 & cpu_evtchn[i];
Ian Campbellc81611c2013-02-20 11:48:06 +00001289 printk("%0*"PRI_xen_ulong"%s",
1290 (int)(sizeof(sh->evtchn_mask[0])*2),
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001291 pending, i % 8 == 0 ? "\n " : " ");
1292 }
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001293
1294 printk("\npending list:\n");
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001295 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
Ian Campbellc81611c2013-02-20 11:48:06 +00001296 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
1297 int word_idx = i / BITS_PER_EVTCHN_WORD;
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001298 printk(" %d: event %d -> irq %d%s%s%s\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001299 cpu_from_evtchn(i), i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001300 evtchn_to_irq[i],
Ian Campbellc81611c2013-02-20 11:48:06 +00001301 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001302 ? "" : " l2-clear",
Ian Campbellc81611c2013-02-20 11:48:06 +00001303 !sync_test_bit(i, BM(sh->evtchn_mask))
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001304 ? "" : " globally-masked",
Ian Campbellc81611c2013-02-20 11:48:06 +00001305 sync_test_bit(i, BM(cpu_evtchn))
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001306 ? "" : " locally-masked");
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001307 }
1308 }
1309
1310 spin_unlock_irqrestore(&debug_lock, flags);
1311
1312 return IRQ_HANDLED;
1313}
1314
Tejun Heo245b2e72009-06-24 15:13:48 +09001315static DEFINE_PER_CPU(unsigned, xed_nesting_count);
Keir Fraserada68142011-03-03 10:01:11 +00001316static DEFINE_PER_CPU(unsigned int, current_word_idx);
1317static DEFINE_PER_CPU(unsigned int, current_bit_idx);
Tejun Heo245b2e72009-06-24 15:13:48 +09001318
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001319/*
Scott Rixnerab7f8632011-03-03 09:30:08 +00001320 * Mask out the i least significant bits of w
1321 */
Ian Campbellc81611c2013-02-20 11:48:06 +00001322#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001323
1324/*
1325 * Search the CPUs pending events bitmasks. For each one found, map
1326 * the event number to an irq, and feed it into do_IRQ() for
1327 * handling.
1328 *
1329 * Xen uses a two-level bitmap to speed searching. The first level is
1330 * a bitset of words which contain pending event bits. The second
1331 * level is a bitset of pending events themselves.
1332 */
Sheng Yang38e20b02010-05-14 12:40:51 +01001333static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001334{
Keir Fraser24b51c22011-03-03 11:06:28 +00001335 int start_word_idx, start_bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001336 int word_idx, bit_idx;
Keir Fraserbee980d2013-03-28 10:03:36 -04001337 int i, irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001338 int cpu = get_cpu();
1339 struct shared_info *s = HYPERVISOR_shared_info;
Christoph Lameter780f36d2010-12-06 11:16:29 -06001340 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Ruslan Pisarev088c05a2011-07-26 14:16:13 +03001341 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001342
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001343 do {
Ian Campbellc81611c2013-02-20 11:48:06 +00001344 xen_ulong_t pending_words;
Keir Fraserbee980d2013-03-28 10:03:36 -04001345 xen_ulong_t pending_bits;
1346 struct irq_desc *desc;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001347
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001348 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001349
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001350 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001351 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001352
Ian Campbellc81611c2013-02-20 11:48:06 +00001353 /*
1354 * Master flag must be cleared /before/ clearing
1355 * selector flag. xchg_xen_ulong must contain an
1356 * appropriate barrier.
1357 */
Keir Fraserbee980d2013-03-28 10:03:36 -04001358 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1359 int evtchn = evtchn_from_irq(irq);
1360 word_idx = evtchn / BITS_PER_LONG;
1361 pending_bits = evtchn % BITS_PER_LONG;
1362 if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1363 desc = irq_to_desc(irq);
1364 if (desc)
1365 generic_handle_irq_desc(irq, desc);
1366 }
1367 }
1368
Ian Campbellc81611c2013-02-20 11:48:06 +00001369 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001370
Keir Fraser24b51c22011-03-03 11:06:28 +00001371 start_word_idx = __this_cpu_read(current_word_idx);
1372 start_bit_idx = __this_cpu_read(current_bit_idx);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001373
Keir Fraser24b51c22011-03-03 11:06:28 +00001374 word_idx = start_word_idx;
1375
1376 for (i = 0; pending_words != 0; i++) {
Ian Campbellc81611c2013-02-20 11:48:06 +00001377 xen_ulong_t words;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001378
Scott Rixnerab7f8632011-03-03 09:30:08 +00001379 words = MASK_LSBS(pending_words, word_idx);
1380
1381 /*
Keir Fraserada68142011-03-03 10:01:11 +00001382 * If we masked out all events, wrap to beginning.
Scott Rixnerab7f8632011-03-03 09:30:08 +00001383 */
1384 if (words == 0) {
Keir Fraserada68142011-03-03 10:01:11 +00001385 word_idx = 0;
1386 bit_idx = 0;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001387 continue;
1388 }
Ian Campbellc81611c2013-02-20 11:48:06 +00001389 word_idx = EVTCHN_FIRST_BIT(words);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001390
Keir Fraser24b51c22011-03-03 11:06:28 +00001391 pending_bits = active_evtchns(cpu, s, word_idx);
1392 bit_idx = 0; /* usually scan entire word from start */
David Vrabel3ef02962013-08-15 13:21:05 +01001393 /*
1394 * We scan the starting word in two parts.
1395 *
1396 * 1st time: start in the middle, scanning the
1397 * upper bits.
1398 *
1399 * 2nd time: scan the whole word (not just the
1400 * parts skipped in the first pass) -- if an
1401 * event in the previously scanned bits is
1402 * pending again it would just be scanned on
1403 * the next loop anyway.
1404 */
Keir Fraser24b51c22011-03-03 11:06:28 +00001405 if (word_idx == start_word_idx) {
Keir Fraser24b51c22011-03-03 11:06:28 +00001406 if (i == 0)
Keir Fraser24b51c22011-03-03 11:06:28 +00001407 bit_idx = start_bit_idx;
Keir Fraser24b51c22011-03-03 11:06:28 +00001408 }
1409
Scott Rixnerab7f8632011-03-03 09:30:08 +00001410 do {
Ian Campbellc81611c2013-02-20 11:48:06 +00001411 xen_ulong_t bits;
Keir Fraserbee980d2013-03-28 10:03:36 -04001412 int port;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001413
Scott Rixnerab7f8632011-03-03 09:30:08 +00001414 bits = MASK_LSBS(pending_bits, bit_idx);
1415
1416 /* If we masked out all events, move on. */
Keir Fraserada68142011-03-03 10:01:11 +00001417 if (bits == 0)
Scott Rixnerab7f8632011-03-03 09:30:08 +00001418 break;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001419
Ian Campbellc81611c2013-02-20 11:48:06 +00001420 bit_idx = EVTCHN_FIRST_BIT(bits);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001421
1422 /* Process port. */
Ian Campbellc81611c2013-02-20 11:48:06 +00001423 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001424 irq = evtchn_to_irq[port];
1425
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001426 if (irq != -1) {
1427 desc = irq_to_desc(irq);
1428 if (desc)
1429 generic_handle_irq_desc(irq, desc);
1430 }
Scott Rixnerab7f8632011-03-03 09:30:08 +00001431
Ian Campbellc81611c2013-02-20 11:48:06 +00001432 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
Keir Fraserada68142011-03-03 10:01:11 +00001433
1434 /* Next caller starts at last processed + 1 */
1435 __this_cpu_write(current_word_idx,
1436 bit_idx ? word_idx :
Ian Campbellc81611c2013-02-20 11:48:06 +00001437 (word_idx+1) % BITS_PER_EVTCHN_WORD);
Keir Fraserada68142011-03-03 10:01:11 +00001438 __this_cpu_write(current_bit_idx, bit_idx);
1439 } while (bit_idx != 0);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001440
Keir Fraser24b51c22011-03-03 11:06:28 +00001441 /* Scan start_l1i twice; all others once. */
1442 if ((word_idx != start_word_idx) || (i != 0))
Scott Rixnerab7f8632011-03-03 09:30:08 +00001443 pending_words &= ~(1UL << word_idx);
Keir Fraserada68142011-03-03 10:01:11 +00001444
Ian Campbellc81611c2013-02-20 11:48:06 +00001445 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001446 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001447
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001448 BUG_ON(!irqs_disabled());
1449
Christoph Lameter780f36d2010-12-06 11:16:29 -06001450 count = __this_cpu_read(xed_nesting_count);
1451 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001452 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001453
1454out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001455
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001456 put_cpu();
1457}
1458
Sheng Yang38e20b02010-05-14 12:40:51 +01001459void xen_evtchn_do_upcall(struct pt_regs *regs)
1460{
1461 struct pt_regs *old_regs = set_irq_regs(regs);
1462
Mojiong Qiu772aebc2012-11-06 16:08:15 +08001463 irq_enter();
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001464#ifdef CONFIG_X86
Sheng Yang38e20b02010-05-14 12:40:51 +01001465 exit_idle();
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001466#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001467
1468 __xen_evtchn_do_upcall();
1469
1470 irq_exit();
1471 set_irq_regs(old_regs);
1472}
1473
1474void xen_hvm_evtchn_do_upcall(void)
1475{
1476 __xen_evtchn_do_upcall();
1477}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001478EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001479
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001480/* Rebind a new event channel to an existing irq. */
1481void rebind_evtchn_irq(int evtchn, int irq)
1482{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001483 struct irq_info *info = info_for_irq(irq);
1484
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001485 if (WARN_ON(!info))
1486 return;
1487
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001488 /* Make sure the irq is masked, since the new event channel
1489 will also be masked. */
1490 disable_irq(irq);
1491
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001492 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001493
1494 /* After resume the irq<->evtchn mappings are all cleared out */
1495 BUG_ON(evtchn_to_irq[evtchn] != -1);
1496 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001497 so there should be a proper type */
1498 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001499
Ian Campbell9158c352011-03-10 16:08:09 +00001500 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001501
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001502 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001503
1504 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301505 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001506
1507 /* Unmask the event channel. */
1508 enable_irq(irq);
1509}
1510
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001511/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001512static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001513{
David Vrabel4704fe42013-08-15 13:21:07 +01001514 struct shared_info *s = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001515 struct evtchn_bind_vcpu bind_vcpu;
1516 int evtchn = evtchn_from_irq(irq);
David Vrabel4704fe42013-08-15 13:21:07 +01001517 int masked;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001518
Ian Campbellbe494722011-03-10 16:08:02 +00001519 if (!VALID_EVTCHN(evtchn))
1520 return -1;
1521
1522 /*
1523 * Events delivered via platform PCI interrupts are always
1524 * routed to vcpu 0 and hence cannot be rebound.
1525 */
1526 if (xen_hvm_domain() && !xen_have_vector_callback)
Yinghai Lud5dedd42009-04-27 17:59:21 -07001527 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001528
1529 /* Send future instances of this interrupt to other vcpu. */
1530 bind_vcpu.port = evtchn;
1531 bind_vcpu.vcpu = tcpu;
1532
1533 /*
David Vrabel4704fe42013-08-15 13:21:07 +01001534 * Mask the event while changing the VCPU binding to prevent
1535 * it being delivered on an unexpected VCPU.
1536 */
1537 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1538
1539 /*
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001540 * If this fails, it usually just indicates that we're dealing with a
1541 * virq or IPI channel, which don't actually need to be rebound. Ignore
1542 * it, but don't do the xenlinux-level rebind in that case.
1543 */
1544 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1545 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001546
David Vrabel4704fe42013-08-15 13:21:07 +01001547 if (!masked)
1548 unmask_evtchn(evtchn);
1549
Yinghai Lud5dedd42009-04-27 17:59:21 -07001550 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001551}
1552
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001553static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1554 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001555{
Rusty Russell0de26522008-12-13 21:20:26 +10301556 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001557
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001558 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001559}
1560
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001561int resend_irq_on_evtchn(unsigned int irq)
1562{
1563 int masked, evtchn = evtchn_from_irq(irq);
1564 struct shared_info *s = HYPERVISOR_shared_info;
1565
1566 if (!VALID_EVTCHN(evtchn))
1567 return 1;
1568
Ian Campbellc81611c2013-02-20 11:48:06 +00001569 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1570 sync_set_bit(evtchn, BM(s->evtchn_pending));
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001571 if (!masked)
1572 unmask_evtchn(evtchn);
1573
1574 return 1;
1575}
1576
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001577static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001578{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001579 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001580
1581 if (VALID_EVTCHN(evtchn))
1582 unmask_evtchn(evtchn);
1583}
1584
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001585static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001586{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001587 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001588
1589 if (VALID_EVTCHN(evtchn))
1590 mask_evtchn(evtchn);
1591}
1592
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001593static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001594{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001595 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001596
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001597 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001598
1599 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001600 clear_evtchn(evtchn);
1601}
1602
1603static void mask_ack_dynirq(struct irq_data *data)
1604{
1605 disable_dynirq(data);
1606 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001607}
1608
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001609static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001610{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001611 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001612 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001613 int ret = 0;
1614
1615 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001616 int masked;
1617
Ian Campbellc81611c2013-02-20 11:48:06 +00001618 masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
1619 sync_set_bit(evtchn, BM(sh->evtchn_pending));
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001620 if (!masked)
1621 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001622 ret = 1;
1623 }
1624
1625 return ret;
1626}
1627
Ian Campbell0a852262011-03-10 16:08:06 +00001628static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001629{
1630 int pirq, rc, irq, gsi;
1631 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001632 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001633
Ian Campbell69c358c2011-03-10 16:08:13 +00001634 list_for_each_entry(info, &xen_irq_list_head, list) {
1635 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001636 continue;
1637
Ian Campbell69c358c2011-03-10 16:08:13 +00001638 pirq = info->u.pirq.pirq;
1639 gsi = info->u.pirq.gsi;
1640 irq = info->irq;
1641
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001642 /* save/restore of PT devices doesn't work, so at this point the
1643 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001644 if (!gsi)
1645 continue;
1646
1647 map_irq.domid = DOMID_SELF;
1648 map_irq.type = MAP_PIRQ_TYPE_GSI;
1649 map_irq.index = gsi;
1650 map_irq.pirq = pirq;
1651
1652 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1653 if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -07001654 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1655 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001656 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001657 continue;
1658 }
1659
1660 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1661
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001662 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001663 }
1664}
1665
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001666static void restore_cpu_virqs(unsigned int cpu)
1667{
1668 struct evtchn_bind_virq bind_virq;
1669 int virq, irq, evtchn;
1670
1671 for (virq = 0; virq < NR_VIRQS; virq++) {
1672 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1673 continue;
1674
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001675 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001676
1677 /* Get a new binding from Xen. */
1678 bind_virq.virq = virq;
1679 bind_virq.vcpu = cpu;
1680 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1681 &bind_virq) != 0)
1682 BUG();
1683 evtchn = bind_virq.port;
1684
1685 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001686 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001687 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001688 }
1689}
1690
1691static void restore_cpu_ipis(unsigned int cpu)
1692{
1693 struct evtchn_bind_ipi bind_ipi;
1694 int ipi, irq, evtchn;
1695
1696 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1697 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1698 continue;
1699
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001700 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001701
1702 /* Get a new binding from Xen. */
1703 bind_ipi.vcpu = cpu;
1704 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1705 &bind_ipi) != 0)
1706 BUG();
1707 evtchn = bind_ipi.port;
1708
1709 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001710 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001711 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001712 }
1713}
1714
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001715/* Clear an irq's pending state, in preparation for polling on it */
1716void xen_clear_irq_pending(int irq)
1717{
1718 int evtchn = evtchn_from_irq(irq);
1719
1720 if (VALID_EVTCHN(evtchn))
1721 clear_evtchn(evtchn);
1722}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001723EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001724void xen_set_irq_pending(int irq)
1725{
1726 int evtchn = evtchn_from_irq(irq);
1727
1728 if (VALID_EVTCHN(evtchn))
1729 set_evtchn(evtchn);
1730}
1731
1732bool xen_test_irq_pending(int irq)
1733{
1734 int evtchn = evtchn_from_irq(irq);
1735 bool ret = false;
1736
1737 if (VALID_EVTCHN(evtchn))
1738 ret = test_evtchn(evtchn);
1739
1740 return ret;
1741}
1742
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001743/* Poll waiting for an irq to become pending with timeout. In the usual case,
1744 * the irq will be disabled so it won't deliver an interrupt. */
1745void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001746{
1747 evtchn_port_t evtchn = evtchn_from_irq(irq);
1748
1749 if (VALID_EVTCHN(evtchn)) {
1750 struct sched_poll poll;
1751
1752 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001753 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001754 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001755
1756 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1757 BUG();
1758 }
1759}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001760EXPORT_SYMBOL(xen_poll_irq_timeout);
1761/* Poll waiting for an irq to become pending. In the usual case, the
1762 * irq will be disabled so it won't deliver an interrupt. */
1763void xen_poll_irq(int irq)
1764{
1765 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1766}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001767
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001768/* Check whether the IRQ line is shared with other guests. */
1769int xen_test_irq_shared(int irq)
1770{
1771 struct irq_info *info = info_for_irq(irq);
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001772 struct physdev_irq_status_query irq_status;
1773
1774 if (WARN_ON(!info))
1775 return -ENOENT;
1776
1777 irq_status.irq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001778
1779 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1780 return 0;
1781 return !(irq_status.flags & XENIRQSTAT_shared);
1782}
1783EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1784
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001785void xen_irq_resume(void)
1786{
Ian Campbell6cb65372011-03-10 16:08:11 +00001787 unsigned int cpu, evtchn;
1788 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001789
1790 init_evtchn_cpu_bindings();
1791
1792 /* New event-channel space is not 'live' yet. */
1793 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1794 mask_evtchn(evtchn);
1795
1796 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001797 list_for_each_entry(info, &xen_irq_list_head, list)
1798 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001799
1800 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1801 evtchn_to_irq[evtchn] = -1;
1802
1803 for_each_possible_cpu(cpu) {
1804 restore_cpu_virqs(cpu);
1805 restore_cpu_ipis(cpu);
1806 }
Ian Campbell69035912010-11-01 16:30:09 +00001807
Ian Campbell0a852262011-03-10 16:08:06 +00001808 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001809}
1810
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001811static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001812 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001813
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001814 .irq_disable = disable_dynirq,
1815 .irq_mask = disable_dynirq,
1816 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001817
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001818 .irq_ack = ack_dynirq,
1819 .irq_mask_ack = mask_ack_dynirq,
1820
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001821 .irq_set_affinity = set_affinity_irq,
1822 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001823};
1824
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001825static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001826 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001827
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001828 .irq_startup = startup_pirq,
1829 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001830 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001831 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001832
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001833 .irq_mask = disable_dynirq,
1834 .irq_unmask = enable_dynirq,
1835
1836 .irq_ack = eoi_pirq,
1837 .irq_eoi = eoi_pirq,
1838 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001839
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001840 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001841
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001842 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001843};
1844
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001845static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001846 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001847
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001848 .irq_disable = disable_dynirq,
1849 .irq_mask = disable_dynirq,
1850 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001851
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001852 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001853};
1854
Sheng Yang38e20b02010-05-14 12:40:51 +01001855int xen_set_callback_via(uint64_t via)
1856{
1857 struct xen_hvm_param a;
1858 a.domid = DOMID_SELF;
1859 a.index = HVM_PARAM_CALLBACK_IRQ;
1860 a.value = via;
1861 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1862}
1863EXPORT_SYMBOL_GPL(xen_set_callback_via);
1864
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001865#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001866/* Vector callbacks are better than PCI interrupts to receive event
1867 * channel notifications because we can receive vector callbacks on any
1868 * vcpu and we don't need PCI support or APIC interactions. */
1869void xen_callback_vector(void)
1870{
1871 int rc;
1872 uint64_t callback_via;
1873 if (xen_have_vector_callback) {
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001874 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
Sheng Yang38e20b02010-05-14 12:40:51 +01001875 rc = xen_set_callback_via(callback_via);
1876 if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -07001877 pr_err("Request for Xen HVM callback vector failed\n");
Sheng Yang38e20b02010-05-14 12:40:51 +01001878 xen_have_vector_callback = 0;
1879 return;
1880 }
Joe Perches283c0972013-06-28 03:21:41 -07001881 pr_info("Xen HVM callback vector for event delivery is enabled\n");
Sheng Yang38e20b02010-05-14 12:40:51 +01001882 /* in the restore case the vector has already been allocated */
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001883 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1884 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1885 xen_hvm_callback_vector);
Sheng Yang38e20b02010-05-14 12:40:51 +01001886 }
1887}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001888#else
1889void xen_callback_vector(void) {}
1890#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001891
Stefano Stabellini2e3d8862012-10-02 15:57:57 +01001892void __init xen_init_IRQ(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001893{
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001894 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001895
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001896 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1897 GFP_KERNEL);
Konrad Rzeszutek Wilk9d093e22011-09-29 13:31:21 -04001898 BUG_ON(!evtchn_to_irq);
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001899 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1900 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001901
1902 init_evtchn_cpu_bindings();
1903
1904 /* No event channels are 'live' right now. */
1905 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1906 mask_evtchn(i);
1907
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001908 pirq_needs_eoi = pirq_needs_eoi_flag;
1909
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001910#ifdef CONFIG_X86
Sheng Yang38e20b02010-05-14 12:40:51 +01001911 if (xen_hvm_domain()) {
1912 xen_callback_vector();
1913 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001914 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1915 * __acpi_register_gsi can point at the right function */
1916 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001917 } else {
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001918 int rc;
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001919 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1920
Sheng Yang38e20b02010-05-14 12:40:51 +01001921 irq_ctx_init(smp_processor_id());
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +01001922 if (xen_initial_domain())
Konrad Rzeszutek Wilka0ee0562011-06-09 09:49:13 -04001923 pci_xen_initial_domain();
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001924
1925 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1926 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1927 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1928 if (rc != 0) {
1929 free_page((unsigned long) pirq_eoi_map);
1930 pirq_eoi_map = NULL;
1931 } else
1932 pirq_needs_eoi = pirq_check_eoi_map;
Sheng Yang38e20b02010-05-14 12:40:51 +01001933 }
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001934#endif
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001935}