blob: 524c22146429d7c87acc8682976031c2f8a727d6 [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
Joe Perches283c0972013-06-28 03:21:41 -070024#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070026#include <linux/linkage.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080031#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040033#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010034#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000036#ifdef CONFIG_X86
Sheng Yang38e20b02010-05-14 12:40:51 +010037#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070038#include <asm/ptrace.h>
39#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080040#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040041#include <asm/io_apic.h>
Boris Ostrovskyb4ff8382015-11-20 11:25:04 -050042#include <asm/i8259.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010043#include <asm/xen/pci.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000044#endif
45#include <asm/sync_bitops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070046#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070047#include <asm/xen/hypervisor.h>
Julien Gralla001c9d2015-05-05 16:37:30 +010048#include <xen/page.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070049
Sheng Yang38e20b02010-05-14 12:40:51 +010050#include <xen/xen.h>
51#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070052#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053#include <xen/events.h>
54#include <xen/interface/xen.h>
55#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010056#include <xen/interface/hvm/hvm_op.h>
57#include <xen/interface/hvm/params.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000058#include <xen/interface/physdev.h>
59#include <xen/interface/sched.h>
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040060#include <xen/interface/vcpu.h>
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +000061#include <asm/hw_irq.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070062
David Vrabel9a489f42013-03-13 15:29:25 +000063#include "events_internal.h"
64
David Vrabelab9a1cc2013-03-14 12:49:19 +000065const struct evtchn_ops *evtchn_ops;
66
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070067/*
68 * This lock protects updates to the following mapping and reference-count
69 * arrays. The lock does not need to be acquired to read the mapping tables.
70 */
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -040071static DEFINE_MUTEX(irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070072
Ian Campbell6cb65372011-03-10 16:08:11 +000073static LIST_HEAD(xen_irq_list_head);
74
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070075/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090076static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070077
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070078/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090079static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070080
David Vrabeld0b075f2013-10-17 15:23:15 +010081int **evtchn_to_irq;
Ian Campbellbf86ad82012-10-17 09:39:12 +010082#ifdef CONFIG_X86
Stefano Stabellini9846ff12012-01-30 16:21:48 +000083static unsigned long *pirq_eoi_map;
Ian Campbellbf86ad82012-10-17 09:39:12 +010084#endif
Stefano Stabellini9846ff12012-01-30 16:21:48 +000085static bool (*pirq_needs_eoi)(unsigned irq);
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -070086
David Vrabeld0b075f2013-10-17 15:23:15 +010087#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
89#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
90
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070091/* Xen will never allocate port zero for any purpose. */
92#define VALID_EVTCHN(chn) ((chn) != 0)
93
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070094static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -070095static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040096static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +010097static void enable_dynirq(struct irq_data *data);
98static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070099
David Vrabeld0b075f2013-10-17 15:23:15 +0100100static void clear_evtchn_to_irq_row(unsigned row)
101{
102 unsigned col;
103
104 for (col = 0; col < EVTCHN_PER_ROW; col++)
105 evtchn_to_irq[row][col] = -1;
106}
107
108static void clear_evtchn_to_irq_all(void)
109{
110 unsigned row;
111
112 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
113 if (evtchn_to_irq[row] == NULL)
114 continue;
115 clear_evtchn_to_irq_row(row);
116 }
117}
118
119static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
120{
121 unsigned row;
122 unsigned col;
123
124 if (evtchn >= xen_evtchn_max_channels())
125 return -EINVAL;
126
127 row = EVTCHN_ROW(evtchn);
128 col = EVTCHN_COL(evtchn);
129
130 if (evtchn_to_irq[row] == NULL) {
131 /* Unallocated irq entries return -1 anyway */
132 if (irq == -1)
133 return 0;
134
135 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
136 if (evtchn_to_irq[row] == NULL)
137 return -ENOMEM;
138
139 clear_evtchn_to_irq_row(row);
140 }
141
142 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
143 return 0;
144}
145
146int get_evtchn_to_irq(unsigned evtchn)
147{
148 if (evtchn >= xen_evtchn_max_channels())
149 return -1;
150 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
151 return -1;
152 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
153}
154
Ian Campbell9158c352011-03-10 16:08:09 +0000155/* Get info for IRQ */
David Vrabel9a489f42013-03-13 15:29:25 +0000156struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700157{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100158 return irq_get_handler_data(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800159}
160
Ian Campbell9158c352011-03-10 16:08:09 +0000161/* Constructors for packed IRQ information. */
David Vrabel96d4c5882013-03-18 15:50:17 +0000162static int xen_irq_info_common_setup(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000163 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000164 enum xen_irq_type type,
David Vrabeld0b075f2013-10-17 15:23:15 +0100165 unsigned evtchn,
Ian Campbell9158c352011-03-10 16:08:09 +0000166 unsigned short cpu)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800167{
David Vrabeld0b075f2013-10-17 15:23:15 +0100168 int ret;
Ian Campbell9158c352011-03-10 16:08:09 +0000169
170 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
171
172 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000173 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000174 info->evtchn = evtchn;
175 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000176
David Vrabeld0b075f2013-10-17 15:23:15 +0100177 ret = set_evtchn_to_irq(evtchn, irq);
178 if (ret < 0)
179 return ret;
Julien Grall934f5852013-04-30 18:29:13 +0100180
181 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
David Vrabel96d4c5882013-03-18 15:50:17 +0000182
David Vrabel08385872013-03-18 16:54:57 +0000183 return xen_evtchn_port_setup(info);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800184}
185
David Vrabel96d4c5882013-03-18 15:50:17 +0000186static int xen_irq_info_evtchn_setup(unsigned irq,
David Vrabeld0b075f2013-10-17 15:23:15 +0100187 unsigned evtchn)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800188{
Ian Campbell9158c352011-03-10 16:08:09 +0000189 struct irq_info *info = info_for_irq(irq);
190
David Vrabel96d4c5882013-03-18 15:50:17 +0000191 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800192}
193
David Vrabel96d4c5882013-03-18 15:50:17 +0000194static int xen_irq_info_ipi_setup(unsigned cpu,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000195 unsigned irq,
David Vrabeld0b075f2013-10-17 15:23:15 +0100196 unsigned evtchn,
Ian Campbell9158c352011-03-10 16:08:09 +0000197 enum ipi_vector ipi)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800198{
Ian Campbell9158c352011-03-10 16:08:09 +0000199 struct irq_info *info = info_for_irq(irq);
200
Ian Campbell9158c352011-03-10 16:08:09 +0000201 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000202
203 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
David Vrabel96d4c5882013-03-18 15:50:17 +0000204
205 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800206}
207
David Vrabel96d4c5882013-03-18 15:50:17 +0000208static int xen_irq_info_virq_setup(unsigned cpu,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000209 unsigned irq,
David Vrabeld0b075f2013-10-17 15:23:15 +0100210 unsigned evtchn,
211 unsigned virq)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800212{
Ian Campbell9158c352011-03-10 16:08:09 +0000213 struct irq_info *info = info_for_irq(irq);
214
Ian Campbell9158c352011-03-10 16:08:09 +0000215 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000216
217 per_cpu(virq_to_irq, cpu)[virq] = irq;
David Vrabel96d4c5882013-03-18 15:50:17 +0000218
219 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000220}
221
David Vrabel96d4c5882013-03-18 15:50:17 +0000222static int xen_irq_info_pirq_setup(unsigned irq,
David Vrabeld0b075f2013-10-17 15:23:15 +0100223 unsigned evtchn,
224 unsigned pirq,
225 unsigned gsi,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400226 uint16_t domid,
Ian Campbell9158c352011-03-10 16:08:09 +0000227 unsigned char flags)
228{
229 struct irq_info *info = info_for_irq(irq);
230
Ian Campbell9158c352011-03-10 16:08:09 +0000231 info->u.pirq.pirq = pirq;
232 info->u.pirq.gsi = gsi;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400233 info->u.pirq.domid = domid;
Ian Campbell9158c352011-03-10 16:08:09 +0000234 info->u.pirq.flags = flags;
David Vrabel96d4c5882013-03-18 15:50:17 +0000235
236 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700237}
238
David Vrabeld0b075f2013-10-17 15:23:15 +0100239static void xen_irq_info_cleanup(struct irq_info *info)
240{
241 set_evtchn_to_irq(info->evtchn, -1);
242 info->evtchn = 0;
243}
244
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700245/*
246 * Accessors for packed IRQ information.
247 */
David Vrabel9a489f42013-03-13 15:29:25 +0000248unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700249{
Andrey Utkin474b8fe2014-07-17 19:25:50 +0300250 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
Joe Jin110e7c72011-01-07 14:50:12 +0800251 return 0;
252
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800253 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700254}
255
Ian Campbelld4c04532009-02-06 19:20:31 -0800256unsigned irq_from_evtchn(unsigned int evtchn)
257{
David Vrabeld0b075f2013-10-17 15:23:15 +0100258 return get_evtchn_to_irq(evtchn);
Ian Campbelld4c04532009-02-06 19:20:31 -0800259}
260EXPORT_SYMBOL_GPL(irq_from_evtchn);
261
David Vrabel9a489f42013-03-13 15:29:25 +0000262int irq_from_virq(unsigned int cpu, unsigned int virq)
263{
264 return per_cpu(virq_to_irq, cpu)[virq];
265}
266
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800267static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700268{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800269 struct irq_info *info = info_for_irq(irq);
270
271 BUG_ON(info == NULL);
272 BUG_ON(info->type != IRQT_IPI);
273
274 return info->u.ipi;
275}
276
277static unsigned virq_from_irq(unsigned irq)
278{
279 struct irq_info *info = info_for_irq(irq);
280
281 BUG_ON(info == NULL);
282 BUG_ON(info->type != IRQT_VIRQ);
283
284 return info->u.virq;
285}
286
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100287static unsigned pirq_from_irq(unsigned irq)
288{
289 struct irq_info *info = info_for_irq(irq);
290
291 BUG_ON(info == NULL);
292 BUG_ON(info->type != IRQT_PIRQ);
293
294 return info->u.pirq.pirq;
295}
296
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800297static enum xen_irq_type type_from_irq(unsigned irq)
298{
299 return info_for_irq(irq)->type;
300}
301
David Vrabel9a489f42013-03-13 15:29:25 +0000302unsigned cpu_from_irq(unsigned irq)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800303{
304 return info_for_irq(irq)->cpu;
305}
306
David Vrabel9a489f42013-03-13 15:29:25 +0000307unsigned int cpu_from_evtchn(unsigned int evtchn)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800308{
David Vrabeld0b075f2013-10-17 15:23:15 +0100309 int irq = get_evtchn_to_irq(evtchn);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800310 unsigned ret = 0;
311
312 if (irq != -1)
313 ret = cpu_from_irq(irq);
314
315 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700316}
317
Ian Campbellbf86ad82012-10-17 09:39:12 +0100318#ifdef CONFIG_X86
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000319static bool pirq_check_eoi_map(unsigned irq)
320{
Stefano Stabellini521394e2012-04-25 16:11:38 +0100321 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000322}
Ian Campbellbf86ad82012-10-17 09:39:12 +0100323#endif
Stefano Stabellini9846ff12012-01-30 16:21:48 +0000324
325static bool pirq_needs_eoi_flag(unsigned irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400326{
327 struct irq_info *info = info_for_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400328 BUG_ON(info->type != IRQT_PIRQ);
329
330 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
331}
332
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700333static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
334{
David Vrabeld0b075f2013-10-17 15:23:15 +0100335 int irq = get_evtchn_to_irq(chn);
David Vrabel9a489f42013-03-13 15:29:25 +0000336 struct irq_info *info = info_for_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700337
338 BUG_ON(irq == -1);
339#ifdef CONFIG_SMP
Jiang Liuc149e4c2015-06-03 11:46:22 +0800340 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700341#endif
David Vrabel9a489f42013-03-13 15:29:25 +0000342 xen_evtchn_port_bind_to_cpu(info, cpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700343
David Vrabel9a489f42013-03-13 15:29:25 +0000344 info->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700345}
346
David Vrabelfd210692013-09-05 18:11:38 +0100347static void xen_evtchn_mask_all(void)
348{
349 unsigned int evtchn;
350
351 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
352 mask_evtchn(evtchn);
353}
354
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700355/**
356 * notify_remote_via_irq - send event to remote end of event channel via irq
357 * @irq: irq of event channel to send event to
358 *
359 * Unlike notify_remote_via_evtchn(), this is safe to use across
360 * save/restore. Notifications on a broken connection are silently
361 * dropped.
362 */
363void notify_remote_via_irq(int irq)
364{
365 int evtchn = evtchn_from_irq(irq);
366
367 if (VALID_EVTCHN(evtchn))
368 notify_remote_via_evtchn(evtchn);
369}
370EXPORT_SYMBOL_GPL(notify_remote_via_irq);
371
Ian Campbell6cb65372011-03-10 16:08:11 +0000372static void xen_irq_init(unsigned irq)
373{
374 struct irq_info *info;
Konrad Rzeszutek Wilkb5328cd2011-06-15 14:24:29 -0400375#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000376 /* By default all event channels notify CPU#0. */
Jiang Liuc149e4c2015-06-03 11:46:22 +0800377 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400378#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000379
Ian Campbellca62ce82011-03-10 16:08:12 +0000380 info = kzalloc(sizeof(*info), GFP_KERNEL);
381 if (info == NULL)
382 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000383
384 info->type = IRQT_UNBOUND;
Daniel De Graaf420eb552011-10-27 17:58:47 -0400385 info->refcnt = -1;
Ian Campbell6cb65372011-03-10 16:08:11 +0000386
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100387 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000388
Ian Campbell6cb65372011-03-10 16:08:11 +0000389 list_add_tail(&info->list, &xen_irq_list_head);
390}
391
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100392static int __must_check xen_allocate_irqs_dynamic(int nvec)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700393{
Thomas Gleixnerd07c9f12014-05-07 15:44:10 +0000394 int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
Ian Campbell89911502011-03-03 11:57:44 -0500395
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100396 if (irq >= 0) {
397 for (i = 0; i < nvec; i++)
398 xen_irq_init(irq + i);
399 }
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800400
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700401 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400402}
403
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100404static inline int __must_check xen_allocate_irq_dynamic(void)
405{
406
407 return xen_allocate_irqs_dynamic(1);
408}
409
Ian Campbell7bee9762011-03-10 16:08:15 +0000410static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000411{
412 int irq;
413
Ian Campbell89911502011-03-03 11:57:44 -0500414 /*
415 * A PV guest has no concept of a GSI (since it has no ACPI
416 * nor access to/knowledge of the physical APICs). Therefore
417 * all IRQs are dynamically allocated from the entire IRQ
418 * space.
419 */
420 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000421 return xen_allocate_irq_dynamic();
422
423 /* Legacy IRQ descriptors are already allocated by the arch. */
Boris Ostrovskyb4ff8382015-11-20 11:25:04 -0500424 if (gsi < nr_legacy_irqs())
Ian Campbell6cb65372011-03-10 16:08:11 +0000425 irq = gsi;
426 else
427 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000428
Ian Campbell6cb65372011-03-10 16:08:11 +0000429 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000430
431 return irq;
432}
433
434static void xen_free_irq(unsigned irq)
435{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100436 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000437
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -0400438 if (WARN_ON(!info))
439 return;
440
Ian Campbell6cb65372011-03-10 16:08:11 +0000441 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000442
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100443 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000444
Daniel De Graaf420eb552011-10-27 17:58:47 -0400445 WARN_ON(info->refcnt > 0);
446
Ian Campbellca62ce82011-03-10 16:08:12 +0000447 kfree(info);
448
Ian Campbell72146102011-02-03 09:49:35 +0000449 /* Legacy IRQ descriptors are managed by the arch. */
Boris Ostrovskyb4ff8382015-11-20 11:25:04 -0500450 if (irq < nr_legacy_irqs())
Ian Campbell72146102011-02-03 09:49:35 +0000451 return;
452
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000453 irq_free_desc(irq);
454}
455
David Vrabeld0b075f2013-10-17 15:23:15 +0100456static void xen_evtchn_close(unsigned int port)
457{
458 struct evtchn_close close;
459
460 close.port = port;
461 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
462 BUG();
David Vrabeld0b075f2013-10-17 15:23:15 +0100463}
464
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400465static void pirq_query_unmask(int irq)
466{
467 struct physdev_irq_status_query irq_status;
468 struct irq_info *info = info_for_irq(irq);
469
470 BUG_ON(info->type != IRQT_PIRQ);
471
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100472 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400473 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
474 irq_status.flags = 0;
475
476 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
477 if (irq_status.flags & XENIRQSTAT_needs_eoi)
478 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
479}
480
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100481static void eoi_pirq(struct irq_data *data)
482{
483 int evtchn = evtchn_from_irq(data->irq);
484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
485 int rc = 0;
486
487 irq_move_irq(data);
488
489 if (VALID_EVTCHN(evtchn))
490 clear_evtchn(evtchn);
491
492 if (pirq_needs_eoi(data->irq)) {
493 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
494 WARN_ON(rc);
495 }
496}
497
498static void mask_ack_pirq(struct irq_data *data)
499{
500 disable_dynirq(data);
501 eoi_pirq(data);
502}
503
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000504static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400505{
506 struct evtchn_bind_pirq bind_pirq;
507 struct irq_info *info = info_for_irq(irq);
508 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400509 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400510
511 BUG_ON(info->type != IRQT_PIRQ);
512
513 if (VALID_EVTCHN(evtchn))
514 goto out;
515
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100516 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400517 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400518 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
519 BIND_PIRQ__WILL_SHARE : 0;
520 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
521 if (rc != 0) {
Thomas Gleixner02893af2014-02-23 21:40:20 +0000522 pr_warn("Failed to obtain physical IRQ %d\n", irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400523 return 0;
524 }
525 evtchn = bind_pirq.port;
526
527 pirq_query_unmask(irq);
528
David Vrabeld0b075f2013-10-17 15:23:15 +0100529 rc = set_evtchn_to_irq(evtchn, irq);
Juergen Gross85e40b02015-02-26 06:52:05 +0100530 if (rc)
531 goto err;
532
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400533 info->evtchn = evtchn;
Boris Ostrovsky16e6bd52015-04-29 17:10:15 -0400534 bind_evtchn_to_cpu(evtchn, 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400535
Juergen Gross85e40b02015-02-26 06:52:05 +0100536 rc = xen_evtchn_port_setup(info);
537 if (rc)
538 goto err;
539
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400540out:
541 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100542 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400543
544 return 0;
Juergen Gross85e40b02015-02-26 06:52:05 +0100545
546err:
547 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
548 xen_evtchn_close(evtchn);
549 return 0;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400550}
551
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000552static unsigned int startup_pirq(struct irq_data *data)
553{
554 return __startup_pirq(data->irq);
555}
556
557static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400558{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000559 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400560 struct irq_info *info = info_for_irq(irq);
David Vrabeld0b075f2013-10-17 15:23:15 +0100561 unsigned evtchn = evtchn_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400562
563 BUG_ON(info->type != IRQT_PIRQ);
564
565 if (!VALID_EVTCHN(evtchn))
566 return;
567
568 mask_evtchn(evtchn);
David Vrabeld0b075f2013-10-17 15:23:15 +0100569 xen_evtchn_close(evtchn);
570 xen_irq_info_cleanup(info);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400571}
572
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000573static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400574{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000575 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400576}
577
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000578static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400579{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100580 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400581}
582
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100583int xen_irq_from_gsi(unsigned gsi)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400584{
Ian Campbell6cb65372011-03-10 16:08:11 +0000585 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400586
Ian Campbell6cb65372011-03-10 16:08:11 +0000587 list_for_each_entry(info, &xen_irq_list_head, list) {
588 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400589 continue;
590
Ian Campbell6cb65372011-03-10 16:08:11 +0000591 if (info->u.pirq.gsi == gsi)
592 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400593 }
594
595 return -1;
596}
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100597EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400598
David Vrabel96d4c5882013-03-18 15:50:17 +0000599static void __unbind_from_irq(unsigned int irq)
600{
David Vrabel96d4c5882013-03-18 15:50:17 +0000601 int evtchn = evtchn_from_irq(irq);
602 struct irq_info *info = irq_get_handler_data(irq);
603
604 if (info->refcnt > 0) {
605 info->refcnt--;
606 if (info->refcnt != 0)
607 return;
608 }
609
610 if (VALID_EVTCHN(evtchn)) {
David Vrabeld0b075f2013-10-17 15:23:15 +0100611 unsigned int cpu = cpu_from_irq(irq);
612
613 xen_evtchn_close(evtchn);
David Vrabel96d4c5882013-03-18 15:50:17 +0000614
615 switch (type_from_irq(irq)) {
616 case IRQT_VIRQ:
David Vrabeld0b075f2013-10-17 15:23:15 +0100617 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
David Vrabel96d4c5882013-03-18 15:50:17 +0000618 break;
619 case IRQT_IPI:
David Vrabeld0b075f2013-10-17 15:23:15 +0100620 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
David Vrabel96d4c5882013-03-18 15:50:17 +0000621 break;
622 default:
623 break;
624 }
625
David Vrabeld0b075f2013-10-17 15:23:15 +0100626 xen_irq_info_cleanup(info);
David Vrabel96d4c5882013-03-18 15:50:17 +0000627 }
628
629 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
630
631 xen_free_irq(irq);
632}
633
Ian Campbell653378a2011-03-10 16:08:04 +0000634/*
635 * Do not make any assumptions regarding the relationship between the
636 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100637 *
638 * Note: We don't assign an event channel until the irq actually started
639 * up. Return an existing irq if we've already got one for the gsi.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100640 *
641 * Shareable implies level triggered, not shareable implies edge
642 * triggered here.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400643 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000644int xen_bind_pirq_gsi_to_irq(unsigned gsi,
645 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400646{
Ian Campbella0e18112011-03-10 16:08:03 +0000647 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400648 struct physdev_irq irq_op;
David Vrabel96d4c5882013-03-18 15:50:17 +0000649 int ret;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400650
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400651 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400652
Stefano Stabellini68c2c392012-05-21 16:54:10 +0100653 irq = xen_irq_from_gsi(gsi);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400654 if (irq != -1) {
Joe Perches283c0972013-06-28 03:21:41 -0700655 pr_info("%s: returning irq %d for gsi %u\n",
656 __func__, irq, gsi);
Daniel De Graaf420eb552011-10-27 17:58:47 -0400657 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400658 }
659
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000660 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000661 if (irq < 0)
662 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400663
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400664 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400665 irq_op.vector = 0;
666
667 /* Only the privileged domain can do this. For non-priv, the pcifront
668 * driver provides a PCI bus that does the call to do exactly
669 * this in the priv domain. */
670 if (xen_initial_domain() &&
671 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000672 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400673 irq = -ENOSPC;
674 goto out;
675 }
676
David Vrabel96d4c5882013-03-18 15:50:17 +0000677 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
Ian Campbell9158c352011-03-10 16:08:09 +0000678 shareable ? PIRQ_SHAREABLE : 0);
David Vrabel96d4c5882013-03-18 15:50:17 +0000679 if (ret < 0) {
680 __unbind_from_irq(irq);
681 irq = ret;
682 goto out;
683 }
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400684
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100685 pirq_query_unmask(irq);
686 /* We try to use the handler with the appropriate semantic for the
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100687 * type of interrupt: if the interrupt is an edge triggered
688 * interrupt we use handle_edge_irq.
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100689 *
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100690 * On the other hand if the interrupt is level triggered we use
691 * handle_fasteoi_irq like the native code does for this kind of
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100692 * interrupts.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100693 *
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100694 * Depending on the Xen version, pirq_needs_eoi might return true
695 * not only for level triggered interrupts but for edge triggered
696 * interrupts too. In any case Xen always honors the eoi mechanism,
697 * not injecting any more pirqs of the same kind if the first one
698 * hasn't received an eoi yet. Therefore using the fasteoi handler
699 * is the right choice either way.
700 */
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100701 if (shareable)
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100702 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
703 handle_fasteoi_irq, name);
704 else
705 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
706 handle_edge_irq, name);
707
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400708out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400709 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400710
711 return irq;
712}
713
Qing Hef731e3ef2010-10-11 15:30:09 +0100714#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000715int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000716{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000717 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000718 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000719
Ian Campbellbf480d92011-02-18 16:43:32 +0000720 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000721 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000722
Ian Campbell5cad61a2011-02-18 16:43:31 +0000723 WARN_ONCE(rc == -ENOSYS,
724 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
725
726 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000727}
728
Ian Campbellbf480d92011-02-18 16:43:32 +0000729int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100730 int pirq, int nvec, const char *name, domid_t domid)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100731{
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100732 int i, irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000733
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400734 mutex_lock(&irq_mapping_update_lock);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100735
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100736 irq = xen_allocate_irqs_dynamic(nvec);
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400737 if (irq < 0)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000738 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100739
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100740 for (i = 0; i < nvec; i++) {
741 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100742
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100743 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
744 i == 0 ? 0 : PIRQ_MSI_GROUP);
745 if (ret < 0)
746 goto error_irq;
747 }
748
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700749 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000750 if (ret < 0)
751 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100752out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400753 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000754 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000755error_irq:
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100756 for (; i >= 0; i--)
757 __unbind_from_irq(irq + i);
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400758 mutex_unlock(&irq_mapping_update_lock);
Konrad Rzeszutek Wilke6599222011-09-29 13:26:45 -0400759 return ret;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100760}
Qing Hef731e3ef2010-10-11 15:30:09 +0100761#endif
762
Alex Nixonb5401a92010-03-18 16:31:34 -0400763int xen_destroy_irq(int irq)
764{
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100765 struct physdev_unmap_pirq unmap_irq;
766 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400767 int rc = -ENOENT;
768
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400769 mutex_lock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400770
Roger Pau Monne4892c9b2014-02-27 19:15:35 +0100771 /*
772 * If trying to remove a vector in a MSI group different
773 * than the first one skip the PIRQ unmap unless this vector
774 * is the first one in the group.
775 */
776 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500777 unmap_irq.pirq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400778 unmap_irq.domid = info->u.pirq.domid;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100779 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500780 /* If another domain quits without making the pci_disable_msix
781 * call, the Xen hypervisor takes care of freeing the PIRQs
782 * (free_domain_pirqs).
783 */
784 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
Joe Perches283c0972013-06-28 03:21:41 -0700785 pr_info("domain %d does not have %d anymore\n",
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500786 info->u.pirq.domid, info->u.pirq.pirq);
787 else if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -0700788 pr_warn("unmap irq failed %d\n", rc);
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100789 goto out;
790 }
791 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400792
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000793 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400794
795out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400796 mutex_unlock(&irq_mapping_update_lock);
Alex Nixonb5401a92010-03-18 16:31:34 -0400797 return rc;
798}
799
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000800int xen_irq_from_pirq(unsigned pirq)
801{
Ian Campbell69c358c2011-03-10 16:08:13 +0000802 int irq;
803
804 struct irq_info *info;
805
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400806 mutex_lock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000807
808 list_for_each_entry(info, &xen_irq_list_head, list) {
Konrad Rzeszutek Wilk9bb9efe2011-09-29 13:13:30 -0400809 if (info->type != IRQT_PIRQ)
Ian Campbell69c358c2011-03-10 16:08:13 +0000810 continue;
811 irq = info->irq;
812 if (info->u.pirq.pirq == pirq)
813 goto out;
814 }
815 irq = -1;
816out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400817 mutex_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000818
819 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000820}
821
Konrad Rzeszutek Wilke6197ac2011-02-24 14:20:12 -0500822
823int xen_pirq_from_irq(unsigned irq)
824{
825 return pirq_from_irq(irq);
826}
827EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
David Vrabel96d4c5882013-03-18 15:50:17 +0000828
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700829int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700830{
831 int irq;
David Vrabel96d4c5882013-03-18 15:50:17 +0000832 int ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700833
David Vrabeld0b075f2013-10-17 15:23:15 +0100834 if (evtchn >= xen_evtchn_max_channels())
835 return -ENOMEM;
836
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400837 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700838
David Vrabeld0b075f2013-10-17 15:23:15 +0100839 irq = get_evtchn_to_irq(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700840
841 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000842 irq = xen_allocate_irq_dynamic();
Wei Liu68ba45f2013-01-31 14:46:56 +0000843 if (irq < 0)
Ian Campbell7bee9762011-03-10 16:08:15 +0000844 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700845
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100847 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700848
David Vrabel96d4c5882013-03-18 15:50:17 +0000849 ret = xen_irq_info_evtchn_setup(irq, evtchn);
850 if (ret < 0) {
851 __unbind_from_irq(irq);
852 irq = ret;
853 goto out;
854 }
David Vrabel97253ee2014-02-05 14:13:10 +0000855 /* New interdomain events are bound to VCPU 0. */
856 bind_evtchn_to_cpu(evtchn, 0);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -0400857 } else {
858 struct irq_info *info = info_for_irq(irq);
859 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700860 }
861
Ian Campbell7bee9762011-03-10 16:08:15 +0000862out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400863 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700864
865 return irq;
866}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700867EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700868
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700869static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
870{
871 struct evtchn_bind_ipi bind_ipi;
872 int evtchn, irq;
David Vrabel96d4c5882013-03-18 15:50:17 +0000873 int ret;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700874
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400875 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700876
877 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800878
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700879 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000880 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700881 if (irq < 0)
882 goto out;
883
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100884 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700885 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700886
887 bind_ipi.vcpu = cpu;
888 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
889 &bind_ipi) != 0)
890 BUG();
891 evtchn = bind_ipi.port;
892
David Vrabel96d4c5882013-03-18 15:50:17 +0000893 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
894 if (ret < 0) {
895 __unbind_from_irq(irq);
896 irq = ret;
897 goto out;
898 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700899 bind_evtchn_to_cpu(evtchn, cpu);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -0400900 } else {
901 struct irq_info *info = info_for_irq(irq);
902 WARN_ON(info == NULL || info->type != IRQT_IPI);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700903 }
904
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700905 out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400906 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700907 return irq;
908}
909
Juergen Gross854072d2014-08-28 06:44:09 +0200910int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
911 unsigned int remote_port)
Ian Campbell2e820f52009-02-09 12:05:50 -0800912{
913 struct evtchn_bind_interdomain bind_interdomain;
914 int err;
915
916 bind_interdomain.remote_dom = remote_domain;
917 bind_interdomain.remote_port = remote_port;
918
919 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
920 &bind_interdomain);
921
922 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
923}
Juergen Gross854072d2014-08-28 06:44:09 +0200924EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
Ian Campbell2e820f52009-02-09 12:05:50 -0800925
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200926static int find_virq(unsigned int virq, unsigned int cpu)
927{
928 struct evtchn_status status;
929 int port, rc = -ENOENT;
930
931 memset(&status, 0, sizeof(status));
David Vrabeld0b075f2013-10-17 15:23:15 +0100932 for (port = 0; port < xen_evtchn_max_channels(); port++) {
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200933 status.dom = DOMID_SELF;
934 status.port = port;
935 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
936 if (rc < 0)
937 continue;
938 if (status.status != EVTCHNSTAT_virq)
939 continue;
940 if (status.u.virq == virq && status.vcpu == cpu) {
941 rc = port;
942 break;
943 }
944 }
945 return rc;
946}
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700947
David Vrabel0dc00642013-09-23 21:03:38 +0100948/**
949 * xen_evtchn_nr_channels - number of usable event channel ports
950 *
951 * This may be less than the maximum supported by the current
952 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
953 * supported.
954 */
955unsigned xen_evtchn_nr_channels(void)
956{
957 return evtchn_ops->nr_channels();
958}
959EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
960
David Vrabel77bb3df2015-05-19 18:40:49 +0100961int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700962{
963 struct evtchn_bind_virq bind_virq;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200964 int evtchn, irq, ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700965
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -0400966 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700967
968 irq = per_cpu(virq_to_irq, cpu)[virq];
969
970 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000971 irq = xen_allocate_irq_dynamic();
Wei Liu68ba45f2013-01-31 14:46:56 +0000972 if (irq < 0)
Ian Campbell7bee9762011-03-10 16:08:15 +0000973 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700974
David Vrabel77bb3df2015-05-19 18:40:49 +0100975 if (percpu)
976 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
977 handle_percpu_irq, "virq");
978 else
979 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
980 handle_edge_irq, "virq");
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700981
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700982 bind_virq.virq = virq;
983 bind_virq.vcpu = cpu;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200984 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
985 &bind_virq);
986 if (ret == 0)
987 evtchn = bind_virq.port;
988 else {
989 if (ret == -EEXIST)
990 ret = find_virq(virq, cpu);
991 BUG_ON(ret < 0);
992 evtchn = ret;
993 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700994
David Vrabel96d4c5882013-03-18 15:50:17 +0000995 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
996 if (ret < 0) {
997 __unbind_from_irq(irq);
998 irq = ret;
999 goto out;
1000 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001001
1002 bind_evtchn_to_cpu(evtchn, cpu);
Konrad Rzeszutek Wilk5e152e62012-05-23 13:28:44 -04001003 } else {
1004 struct irq_info *info = info_for_irq(irq);
1005 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001006 }
1007
Ian Campbell7bee9762011-03-10 16:08:15 +00001008out:
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001009 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001010
1011 return irq;
1012}
1013
1014static void unbind_from_irq(unsigned int irq)
1015{
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001016 mutex_lock(&irq_mapping_update_lock);
David Vrabel96d4c5882013-03-18 15:50:17 +00001017 __unbind_from_irq(irq);
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001018 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001019}
1020
1021int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -04001022 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001023 unsigned long irqflags,
1024 const char *devname, void *dev_id)
1025{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001026 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001027
1028 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +00001029 if (irq < 0)
1030 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001031 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1032 if (retval != 0) {
1033 unbind_from_irq(irq);
1034 return retval;
1035 }
1036
1037 return irq;
1038}
1039EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1040
Ian Campbell2e820f52009-02-09 12:05:50 -08001041int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1042 unsigned int remote_port,
1043 irq_handler_t handler,
1044 unsigned long irqflags,
1045 const char *devname,
1046 void *dev_id)
1047{
1048 int irq, retval;
1049
1050 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1051 if (irq < 0)
1052 return irq;
1053
1054 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1055 if (retval != 0) {
1056 unbind_from_irq(irq);
1057 return retval;
1058 }
1059
1060 return irq;
1061}
1062EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1063
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001064int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -04001065 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001066 unsigned long irqflags, const char *devname, void *dev_id)
1067{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001068 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001069
David Vrabel77bb3df2015-05-19 18:40:49 +01001070 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
Ian Campbell7bee9762011-03-10 16:08:15 +00001071 if (irq < 0)
1072 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001073 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1074 if (retval != 0) {
1075 unbind_from_irq(irq);
1076 return retval;
1077 }
1078
1079 return irq;
1080}
1081EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1082
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001083int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1084 unsigned int cpu,
1085 irq_handler_t handler,
1086 unsigned long irqflags,
1087 const char *devname,
1088 void *dev_id)
1089{
1090 int irq, retval;
1091
1092 irq = bind_ipi_to_irq(ipi, cpu);
1093 if (irq < 0)
1094 return irq;
1095
Ian Campbell9bab0b72011-10-03 15:37:00 +01001096 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001097 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1098 if (retval != 0) {
1099 unbind_from_irq(irq);
1100 return retval;
1101 }
1102
1103 return irq;
1104}
1105
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001106void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1107{
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001108 struct irq_info *info = irq_get_handler_data(irq);
1109
1110 if (WARN_ON(!info))
1111 return;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001112 free_irq(irq, dev_id);
1113 unbind_from_irq(irq);
1114}
1115EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1116
David Vrabel6ccecb02013-09-23 12:47:26 +01001117/**
1118 * xen_set_irq_priority() - set an event channel priority.
1119 * @irq:irq bound to an event channel.
1120 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1121 */
1122int xen_set_irq_priority(unsigned irq, unsigned priority)
1123{
1124 struct evtchn_set_priority set_priority;
1125
1126 set_priority.port = evtchn_from_irq(irq);
1127 set_priority.priority = priority;
1128
1129 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1130 &set_priority);
1131}
1132EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1133
Daniel De Graaf420eb552011-10-27 17:58:47 -04001134int evtchn_make_refcounted(unsigned int evtchn)
1135{
David Vrabeld0b075f2013-10-17 15:23:15 +01001136 int irq = get_evtchn_to_irq(evtchn);
Daniel De Graaf420eb552011-10-27 17:58:47 -04001137 struct irq_info *info;
1138
1139 if (irq == -1)
1140 return -ENOENT;
1141
1142 info = irq_get_handler_data(irq);
1143
1144 if (!info)
1145 return -ENOENT;
1146
1147 WARN_ON(info->refcnt != -1);
1148
1149 info->refcnt = 1;
1150
1151 return 0;
1152}
1153EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1154
1155int evtchn_get(unsigned int evtchn)
1156{
1157 int irq;
1158 struct irq_info *info;
1159 int err = -ENOENT;
1160
David Vrabeld0b075f2013-10-17 15:23:15 +01001161 if (evtchn >= xen_evtchn_max_channels())
Daniel De Graafc3b3f162011-11-28 11:49:09 -05001162 return -EINVAL;
1163
Daniel De Graaf420eb552011-10-27 17:58:47 -04001164 mutex_lock(&irq_mapping_update_lock);
1165
David Vrabeld0b075f2013-10-17 15:23:15 +01001166 irq = get_evtchn_to_irq(evtchn);
Daniel De Graaf420eb552011-10-27 17:58:47 -04001167 if (irq == -1)
1168 goto done;
1169
1170 info = irq_get_handler_data(irq);
1171
1172 if (!info)
1173 goto done;
1174
1175 err = -EINVAL;
1176 if (info->refcnt <= 0)
1177 goto done;
1178
1179 info->refcnt++;
1180 err = 0;
1181 done:
1182 mutex_unlock(&irq_mapping_update_lock);
1183
1184 return err;
1185}
1186EXPORT_SYMBOL_GPL(evtchn_get);
1187
1188void evtchn_put(unsigned int evtchn)
1189{
David Vrabeld0b075f2013-10-17 15:23:15 +01001190 int irq = get_evtchn_to_irq(evtchn);
Daniel De Graaf420eb552011-10-27 17:58:47 -04001191 if (WARN_ON(irq == -1))
1192 return;
1193 unbind_from_irq(irq);
1194}
1195EXPORT_SYMBOL_GPL(evtchn_put);
1196
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001197void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1198{
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001199 int irq;
1200
Stefano Stabellini072b2062013-08-13 16:57:06 +00001201#ifdef CONFIG_X86
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001202 if (unlikely(vector == XEN_NMI_VECTOR)) {
1203 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1204 if (rc < 0)
1205 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1206 return;
1207 }
Stefano Stabellini072b2062013-08-13 16:57:06 +00001208#endif
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -04001209 irq = per_cpu(ipi_to_irq, cpu)[vector];
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001210 BUG_ON(irq < 0);
1211 notify_remote_via_irq(irq);
1212}
1213
Tejun Heo245b2e72009-06-24 15:13:48 +09001214static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1215
Sheng Yang38e20b02010-05-14 12:40:51 +01001216static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001217{
Christoph Lameter780f36d2010-12-06 11:16:29 -06001218 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
David Vrabel9a489f42013-03-13 15:29:25 +00001219 int cpu = get_cpu();
Ruslan Pisarev088c05a2011-07-26 14:16:13 +03001220 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001221
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001222 do {
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001223 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001224
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001225 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001226 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001227
David Vrabel9a489f42013-03-13 15:29:25 +00001228 xen_evtchn_handle_events(cpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001229
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001230 BUG_ON(!irqs_disabled());
1231
Christoph Lameter780f36d2010-12-06 11:16:29 -06001232 count = __this_cpu_read(xed_nesting_count);
1233 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001234 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001235
1236out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001237
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001238 put_cpu();
1239}
1240
Sheng Yang38e20b02010-05-14 12:40:51 +01001241void xen_evtchn_do_upcall(struct pt_regs *regs)
1242{
1243 struct pt_regs *old_regs = set_irq_regs(regs);
1244
Mojiong Qiu772aebc2012-11-06 16:08:15 +08001245 irq_enter();
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001246#ifdef CONFIG_X86
Sheng Yang38e20b02010-05-14 12:40:51 +01001247 exit_idle();
Thomas Gleixner99c8b792014-02-23 21:40:21 +00001248 inc_irq_stat(irq_hv_callback_count);
Arnd Bergmannd06eb3e2014-03-25 16:52:25 +01001249#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001250
1251 __xen_evtchn_do_upcall();
1252
1253 irq_exit();
1254 set_irq_regs(old_regs);
1255}
1256
1257void xen_hvm_evtchn_do_upcall(void)
1258{
1259 __xen_evtchn_do_upcall();
1260}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001261EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001262
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001263/* Rebind a new event channel to an existing irq. */
1264void rebind_evtchn_irq(int evtchn, int irq)
1265{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001266 struct irq_info *info = info_for_irq(irq);
1267
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001268 if (WARN_ON(!info))
1269 return;
1270
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001271 /* Make sure the irq is masked, since the new event channel
1272 will also be masked. */
1273 disable_irq(irq);
1274
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001275 mutex_lock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001276
1277 /* After resume the irq<->evtchn mappings are all cleared out */
David Vrabeld0b075f2013-10-17 15:23:15 +01001278 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001279 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001280 so there should be a proper type */
1281 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001282
David Vrabel96d4c5882013-03-18 15:50:17 +00001283 (void)xen_irq_info_evtchn_setup(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001284
Konrad Rzeszutek Wilk77365942011-09-14 05:10:00 -04001285 mutex_unlock(&irq_mapping_update_lock);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001286
Boris Ostrovsky5cec9882015-04-29 17:10:12 -04001287 bind_evtchn_to_cpu(evtchn, info->cpu);
1288 /* This will be deferred until interrupt is processed */
1289 irq_set_affinity(irq, cpumask_of(info->cpu));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001290
1291 /* Unmask the event channel. */
1292 enable_irq(irq);
1293}
1294
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001295/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001296static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001297{
1298 struct evtchn_bind_vcpu bind_vcpu;
1299 int evtchn = evtchn_from_irq(irq);
David Vrabel4704fe42013-08-15 13:21:07 +01001300 int masked;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001301
Ian Campbellbe494722011-03-10 16:08:02 +00001302 if (!VALID_EVTCHN(evtchn))
1303 return -1;
1304
Julien Grall4a5b6942015-07-28 10:10:42 +01001305 if (!xen_support_evtchn_rebind())
Yinghai Lud5dedd42009-04-27 17:59:21 -07001306 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001307
1308 /* Send future instances of this interrupt to other vcpu. */
1309 bind_vcpu.port = evtchn;
1310 bind_vcpu.vcpu = tcpu;
1311
1312 /*
David Vrabel4704fe42013-08-15 13:21:07 +01001313 * Mask the event while changing the VCPU binding to prevent
1314 * it being delivered on an unexpected VCPU.
1315 */
Wei Liu3f70fa82013-03-07 15:50:27 +00001316 masked = test_and_set_mask(evtchn);
David Vrabel4704fe42013-08-15 13:21:07 +01001317
1318 /*
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001319 * If this fails, it usually just indicates that we're dealing with a
1320 * virq or IPI channel, which don't actually need to be rebound. Ignore
1321 * it, but don't do the xenlinux-level rebind in that case.
1322 */
1323 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1324 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001325
David Vrabel4704fe42013-08-15 13:21:07 +01001326 if (!masked)
1327 unmask_evtchn(evtchn);
1328
Yinghai Lud5dedd42009-04-27 17:59:21 -07001329 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001330}
1331
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001332static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1333 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001334{
Thomas Gleixner753fbd22014-03-04 20:43:40 +00001335 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001336
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001337 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001338}
1339
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001340static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001341{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001342 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001343
1344 if (VALID_EVTCHN(evtchn))
1345 unmask_evtchn(evtchn);
1346}
1347
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001348static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001349{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001350 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001351
1352 if (VALID_EVTCHN(evtchn))
1353 mask_evtchn(evtchn);
1354}
1355
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001356static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001357{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001358 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001359
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001360 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001361
1362 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001363 clear_evtchn(evtchn);
1364}
1365
1366static void mask_ack_dynirq(struct irq_data *data)
1367{
1368 disable_dynirq(data);
1369 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001370}
1371
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001372static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001373{
David Vrabel4640ddf2014-02-17 17:45:16 +00001374 unsigned int evtchn = evtchn_from_irq(data->irq);
1375 int masked;
1376
1377 if (!VALID_EVTCHN(evtchn))
1378 return 0;
1379
1380 masked = test_and_set_mask(evtchn);
1381 set_evtchn(evtchn);
1382 if (!masked)
1383 unmask_evtchn(evtchn);
1384
1385 return 1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001386}
1387
Ian Campbell0a852262011-03-10 16:08:06 +00001388static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001389{
1390 int pirq, rc, irq, gsi;
1391 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001392 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001393
Ian Campbell69c358c2011-03-10 16:08:13 +00001394 list_for_each_entry(info, &xen_irq_list_head, list) {
1395 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001396 continue;
1397
Ian Campbell69c358c2011-03-10 16:08:13 +00001398 pirq = info->u.pirq.pirq;
1399 gsi = info->u.pirq.gsi;
1400 irq = info->irq;
1401
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001402 /* save/restore of PT devices doesn't work, so at this point the
1403 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001404 if (!gsi)
1405 continue;
1406
1407 map_irq.domid = DOMID_SELF;
1408 map_irq.type = MAP_PIRQ_TYPE_GSI;
1409 map_irq.index = gsi;
1410 map_irq.pirq = pirq;
1411
1412 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1413 if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -07001414 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1415 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001416 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001417 continue;
1418 }
1419
1420 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1421
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001422 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001423 }
1424}
1425
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001426static void restore_cpu_virqs(unsigned int cpu)
1427{
1428 struct evtchn_bind_virq bind_virq;
1429 int virq, irq, evtchn;
1430
1431 for (virq = 0; virq < NR_VIRQS; virq++) {
1432 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1433 continue;
1434
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001435 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001436
1437 /* Get a new binding from Xen. */
1438 bind_virq.virq = virq;
1439 bind_virq.vcpu = cpu;
1440 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1441 &bind_virq) != 0)
1442 BUG();
1443 evtchn = bind_virq.port;
1444
1445 /* Record the new mapping. */
David Vrabel96d4c5882013-03-18 15:50:17 +00001446 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001447 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001448 }
1449}
1450
1451static void restore_cpu_ipis(unsigned int cpu)
1452{
1453 struct evtchn_bind_ipi bind_ipi;
1454 int ipi, irq, evtchn;
1455
1456 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1457 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1458 continue;
1459
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001460 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001461
1462 /* Get a new binding from Xen. */
1463 bind_ipi.vcpu = cpu;
1464 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1465 &bind_ipi) != 0)
1466 BUG();
1467 evtchn = bind_ipi.port;
1468
1469 /* Record the new mapping. */
David Vrabel96d4c5882013-03-18 15:50:17 +00001470 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001471 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001472 }
1473}
1474
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001475/* Clear an irq's pending state, in preparation for polling on it */
1476void xen_clear_irq_pending(int irq)
1477{
1478 int evtchn = evtchn_from_irq(irq);
1479
1480 if (VALID_EVTCHN(evtchn))
1481 clear_evtchn(evtchn);
1482}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001483EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001484void xen_set_irq_pending(int irq)
1485{
1486 int evtchn = evtchn_from_irq(irq);
1487
1488 if (VALID_EVTCHN(evtchn))
1489 set_evtchn(evtchn);
1490}
1491
1492bool xen_test_irq_pending(int irq)
1493{
1494 int evtchn = evtchn_from_irq(irq);
1495 bool ret = false;
1496
1497 if (VALID_EVTCHN(evtchn))
1498 ret = test_evtchn(evtchn);
1499
1500 return ret;
1501}
1502
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001503/* Poll waiting for an irq to become pending with timeout. In the usual case,
1504 * the irq will be disabled so it won't deliver an interrupt. */
1505void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001506{
1507 evtchn_port_t evtchn = evtchn_from_irq(irq);
1508
1509 if (VALID_EVTCHN(evtchn)) {
1510 struct sched_poll poll;
1511
1512 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001513 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001514 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001515
1516 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1517 BUG();
1518 }
1519}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001520EXPORT_SYMBOL(xen_poll_irq_timeout);
1521/* Poll waiting for an irq to become pending. In the usual case, the
1522 * irq will be disabled so it won't deliver an interrupt. */
1523void xen_poll_irq(int irq)
1524{
1525 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1526}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001527
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001528/* Check whether the IRQ line is shared with other guests. */
1529int xen_test_irq_shared(int irq)
1530{
1531 struct irq_info *info = info_for_irq(irq);
Konrad Rzeszutek Wilk94032c52013-04-16 10:55:18 -04001532 struct physdev_irq_status_query irq_status;
1533
1534 if (WARN_ON(!info))
1535 return -ENOENT;
1536
1537 irq_status.irq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001538
1539 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1540 return 0;
1541 return !(irq_status.flags & XENIRQSTAT_shared);
1542}
1543EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1544
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001545void xen_irq_resume(void)
1546{
David Vrabelfd210692013-09-05 18:11:38 +01001547 unsigned int cpu;
Ian Campbell6cb65372011-03-10 16:08:11 +00001548 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001549
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001550 /* New event-channel space is not 'live' yet. */
David Vrabelfd210692013-09-05 18:11:38 +01001551 xen_evtchn_mask_all();
David Vrabel1fe56552013-03-15 13:02:35 +00001552 xen_evtchn_resume();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001553
1554 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001555 list_for_each_entry(info, &xen_irq_list_head, list)
1556 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001557
David Vrabeld0b075f2013-10-17 15:23:15 +01001558 clear_evtchn_to_irq_all();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001559
1560 for_each_possible_cpu(cpu) {
1561 restore_cpu_virqs(cpu);
1562 restore_cpu_ipis(cpu);
1563 }
Ian Campbell69035912010-11-01 16:30:09 +00001564
Ian Campbell0a852262011-03-10 16:08:06 +00001565 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001566}
1567
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001568static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001569 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001570
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001571 .irq_disable = disable_dynirq,
1572 .irq_mask = disable_dynirq,
1573 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001574
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001575 .irq_ack = ack_dynirq,
1576 .irq_mask_ack = mask_ack_dynirq,
1577
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001578 .irq_set_affinity = set_affinity_irq,
1579 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001580};
1581
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001582static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001583 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001584
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001585 .irq_startup = startup_pirq,
1586 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001587 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001588 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001589
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001590 .irq_mask = disable_dynirq,
1591 .irq_unmask = enable_dynirq,
1592
1593 .irq_ack = eoi_pirq,
1594 .irq_eoi = eoi_pirq,
1595 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001596
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001597 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001598
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001599 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001600};
1601
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001602static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001603 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001604
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001605 .irq_disable = disable_dynirq,
1606 .irq_mask = disable_dynirq,
1607 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001608
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001609 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001610};
1611
Sheng Yang38e20b02010-05-14 12:40:51 +01001612int xen_set_callback_via(uint64_t via)
1613{
1614 struct xen_hvm_param a;
1615 a.domid = DOMID_SELF;
1616 a.index = HVM_PARAM_CALLBACK_IRQ;
1617 a.value = via;
1618 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1619}
1620EXPORT_SYMBOL_GPL(xen_set_callback_via);
1621
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001622#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001623/* Vector callbacks are better than PCI interrupts to receive event
1624 * channel notifications because we can receive vector callbacks on any
1625 * vcpu and we don't need PCI support or APIC interactions. */
1626void xen_callback_vector(void)
1627{
1628 int rc;
1629 uint64_t callback_via;
1630 if (xen_have_vector_callback) {
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001631 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
Sheng Yang38e20b02010-05-14 12:40:51 +01001632 rc = xen_set_callback_via(callback_via);
1633 if (rc) {
Joe Perches283c0972013-06-28 03:21:41 -07001634 pr_err("Request for Xen HVM callback vector failed\n");
Sheng Yang38e20b02010-05-14 12:40:51 +01001635 xen_have_vector_callback = 0;
1636 return;
1637 }
Joe Perches283c0972013-06-28 03:21:41 -07001638 pr_info("Xen HVM callback vector for event delivery is enabled\n");
Sheng Yang38e20b02010-05-14 12:40:51 +01001639 /* in the restore case the vector has already been allocated */
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001640 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1641 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1642 xen_hvm_callback_vector);
Sheng Yang38e20b02010-05-14 12:40:51 +01001643 }
1644}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001645#else
1646void xen_callback_vector(void) {}
1647#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001648
David Vrabel1fe56552013-03-15 13:02:35 +00001649#undef MODULE_PARAM_PREFIX
1650#define MODULE_PARAM_PREFIX "xen."
1651
1652static bool fifo_events = true;
1653module_param(fifo_events, bool, 0);
1654
Stefano Stabellini2e3d8862012-10-02 15:57:57 +01001655void __init xen_init_IRQ(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001656{
David Vrabel1fe56552013-03-15 13:02:35 +00001657 int ret = -EINVAL;
1658
1659 if (fifo_events)
1660 ret = xen_evtchn_fifo_init();
1661 if (ret < 0)
1662 xen_evtchn_2l_init();
David Vrabelab9a1cc2013-03-14 12:49:19 +00001663
David Vrabeld0b075f2013-10-17 15:23:15 +01001664 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1665 sizeof(*evtchn_to_irq), GFP_KERNEL);
Konrad Rzeszutek Wilk9d093e22011-09-29 13:31:21 -04001666 BUG_ON(!evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001667
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001668 /* No event channels are 'live' right now. */
David Vrabelfd210692013-09-05 18:11:38 +01001669 xen_evtchn_mask_all();
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001670
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001671 pirq_needs_eoi = pirq_needs_eoi_flag;
1672
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001673#ifdef CONFIG_X86
Mukesh Rathor27713742013-12-11 15:36:51 -05001674 if (xen_pv_domain()) {
1675 irq_ctx_init(smp_processor_id());
1676 if (xen_initial_domain())
1677 pci_xen_initial_domain();
1678 }
1679 if (xen_feature(XENFEAT_hvm_callback_vector))
Sheng Yang38e20b02010-05-14 12:40:51 +01001680 xen_callback_vector();
Mukesh Rathor27713742013-12-11 15:36:51 -05001681
1682 if (xen_hvm_domain()) {
Sheng Yang38e20b02010-05-14 12:40:51 +01001683 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001684 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1685 * __acpi_register_gsi can point at the right function */
1686 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001687 } else {
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001688 int rc;
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001689 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1690
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001691 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
Julien Grall0df4f262015-08-07 17:34:37 +01001692 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001693 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
Mukesh Rathor27713742013-12-11 15:36:51 -05001694 /* TODO: No PVH support for PIRQ EOI */
Stefano Stabellini9846ff12012-01-30 16:21:48 +00001695 if (rc != 0) {
1696 free_page((unsigned long) pirq_eoi_map);
1697 pirq_eoi_map = NULL;
1698 } else
1699 pirq_needs_eoi = pirq_check_eoi_map;
Sheng Yang38e20b02010-05-14 12:40:51 +01001700 }
Stefano Stabellini0ec53ec2012-09-14 13:37:32 +00001701#endif
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001702}