blob: e6d47e8ca1ac31374bc48a63ad84d3ddf86a36fd [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
29
30#include <asm/ptrace.h>
31#include <asm/irq.h>
32#include <asm/sync_bitops.h>
33#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070034#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070036#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070037#include <xen/events.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h>
40
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041/*
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
44 */
45static DEFINE_SPINLOCK(irq_mapping_update_lock);
46
47/* IRQ <-> VIRQ mapping. */
48static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
49
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070050/* IRQ <-> IPI mapping */
51static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
52
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053/* Packed IRQ information: binding type, sub-type index, and event channel. */
54struct packed_irq
55{
56 unsigned short evtchn;
57 unsigned char index;
58 unsigned char type;
59};
60
61static struct packed_irq irq_info[NR_IRQS];
62
63/* Binding types. */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070064enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070071
72/* Convenient shorthand for packed representation of an unbound IRQ. */
73#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
74
75static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1
77};
78static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
79static u8 cpu_evtchn[NR_EVENT_CHANNELS];
80
81/* Reference counts for bindings to IRQs. */
82static int irq_bindcount[NR_IRQS];
83
84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0)
86
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070087static struct irq_chip xen_dynamic_chip;
88
89/* Constructor for packed IRQ information. */
90static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
91{
92 return (struct packed_irq) { evtchn, index, type };
93}
94
95/*
96 * Accessors for packed IRQ information.
97 */
98static inline unsigned int evtchn_from_irq(int irq)
99{
100 return irq_info[irq].evtchn;
101}
102
103static inline unsigned int index_from_irq(int irq)
104{
105 return irq_info[irq].index;
106}
107
108static inline unsigned int type_from_irq(int irq)
109{
110 return irq_info[irq].type;
111}
112
113static inline unsigned long active_evtchns(unsigned int cpu,
114 struct shared_info *sh,
115 unsigned int idx)
116{
117 return (sh->evtchn_pending[idx] &
118 cpu_evtchn_mask[cpu][idx] &
119 ~sh->evtchn_mask[idx]);
120}
121
122static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
123{
124 int irq = evtchn_to_irq[chn];
125
126 BUG_ON(irq == -1);
127#ifdef CONFIG_SMP
Yinghai Lu08678b02008-08-19 20:50:05 -0700128 irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700129#endif
130
131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
132 __set_bit(chn, cpu_evtchn_mask[cpu]);
133
134 cpu_evtchn[chn] = cpu;
135}
136
137static void init_evtchn_cpu_bindings(void)
138{
139#ifdef CONFIG_SMP
140 int i;
141 /* By default all event channels notify CPU#0. */
Yinghai Lu08678b02008-08-19 20:50:05 -0700142 for (i = 0; i < nr_irqs; i++) {
143 struct irq_desc *desc = irq_to_desc(i);
Alex Nixon2b46b372008-08-19 20:50:53 -0700144 if (!desc)
145 continue;
Yinghai Lu08678b02008-08-19 20:50:05 -0700146 desc->affinity = cpumask_of_cpu(0);
147 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700148#endif
149
150 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
151 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
152}
153
154static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
155{
156 return cpu_evtchn[evtchn];
157}
158
159static inline void clear_evtchn(int port)
160{
161 struct shared_info *s = HYPERVISOR_shared_info;
162 sync_clear_bit(port, &s->evtchn_pending[0]);
163}
164
165static inline void set_evtchn(int port)
166{
167 struct shared_info *s = HYPERVISOR_shared_info;
168 sync_set_bit(port, &s->evtchn_pending[0]);
169}
170
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700171static inline int test_evtchn(int port)
172{
173 struct shared_info *s = HYPERVISOR_shared_info;
174 return sync_test_bit(port, &s->evtchn_pending[0]);
175}
176
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700177
178/**
179 * notify_remote_via_irq - send event to remote end of event channel via irq
180 * @irq: irq of event channel to send event to
181 *
182 * Unlike notify_remote_via_evtchn(), this is safe to use across
183 * save/restore. Notifications on a broken connection are silently
184 * dropped.
185 */
186void notify_remote_via_irq(int irq)
187{
188 int evtchn = evtchn_from_irq(irq);
189
190 if (VALID_EVTCHN(evtchn))
191 notify_remote_via_evtchn(evtchn);
192}
193EXPORT_SYMBOL_GPL(notify_remote_via_irq);
194
195static void mask_evtchn(int port)
196{
197 struct shared_info *s = HYPERVISOR_shared_info;
198 sync_set_bit(port, &s->evtchn_mask[0]);
199}
200
201static void unmask_evtchn(int port)
202{
203 struct shared_info *s = HYPERVISOR_shared_info;
204 unsigned int cpu = get_cpu();
205
206 BUG_ON(!irqs_disabled());
207
208 /* Slow path (hypercall) if this is a non-local port. */
209 if (unlikely(cpu != cpu_from_evtchn(port))) {
210 struct evtchn_unmask unmask = { .port = port };
211 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
212 } else {
213 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
214
215 sync_clear_bit(port, &s->evtchn_mask[0]);
216
217 /*
218 * The following is basically the equivalent of
219 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
220 * the interrupt edge' if the channel is masked.
221 */
222 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
223 !sync_test_and_set_bit(port / BITS_PER_LONG,
224 &vcpu_info->evtchn_pending_sel))
225 vcpu_info->evtchn_upcall_pending = 1;
226 }
227
228 put_cpu();
229}
230
231static int find_unbound_irq(void)
232{
233 int irq;
234
235 /* Only allocate from dynirq range */
Yinghai Lu5a15d7e2008-08-19 20:49:57 -0700236 for (irq = 0; irq < nr_irqs; irq++)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700237 if (irq_bindcount[irq] == 0)
238 break;
239
Yinghai Lu5a15d7e2008-08-19 20:49:57 -0700240 if (irq == nr_irqs)
241 panic("No available IRQ to bind to: increase nr_irqs!\n");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700242
243 return irq;
244}
245
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700246int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700247{
248 int irq;
249
250 spin_lock(&irq_mapping_update_lock);
251
252 irq = evtchn_to_irq[evtchn];
253
254 if (irq == -1) {
255 irq = find_unbound_irq();
256
257 dynamic_irq_init(irq);
258 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
259 handle_level_irq, "event");
260
261 evtchn_to_irq[evtchn] = irq;
262 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
263 }
264
265 irq_bindcount[irq]++;
266
267 spin_unlock(&irq_mapping_update_lock);
268
269 return irq;
270}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700271EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700272
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700273static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
274{
275 struct evtchn_bind_ipi bind_ipi;
276 int evtchn, irq;
277
278 spin_lock(&irq_mapping_update_lock);
279
280 irq = per_cpu(ipi_to_irq, cpu)[ipi];
281 if (irq == -1) {
282 irq = find_unbound_irq();
283 if (irq < 0)
284 goto out;
285
286 dynamic_irq_init(irq);
287 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
288 handle_level_irq, "ipi");
289
290 bind_ipi.vcpu = cpu;
291 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
292 &bind_ipi) != 0)
293 BUG();
294 evtchn = bind_ipi.port;
295
296 evtchn_to_irq[evtchn] = irq;
297 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
298
299 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
300
301 bind_evtchn_to_cpu(evtchn, cpu);
302 }
303
304 irq_bindcount[irq]++;
305
306 out:
307 spin_unlock(&irq_mapping_update_lock);
308 return irq;
309}
310
311
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700312static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
313{
314 struct evtchn_bind_virq bind_virq;
315 int evtchn, irq;
316
317 spin_lock(&irq_mapping_update_lock);
318
319 irq = per_cpu(virq_to_irq, cpu)[virq];
320
321 if (irq == -1) {
322 bind_virq.virq = virq;
323 bind_virq.vcpu = cpu;
324 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
325 &bind_virq) != 0)
326 BUG();
327 evtchn = bind_virq.port;
328
329 irq = find_unbound_irq();
330
331 dynamic_irq_init(irq);
332 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
333 handle_level_irq, "virq");
334
335 evtchn_to_irq[evtchn] = irq;
336 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
337
338 per_cpu(virq_to_irq, cpu)[virq] = irq;
339
340 bind_evtchn_to_cpu(evtchn, cpu);
341 }
342
343 irq_bindcount[irq]++;
344
345 spin_unlock(&irq_mapping_update_lock);
346
347 return irq;
348}
349
350static void unbind_from_irq(unsigned int irq)
351{
352 struct evtchn_close close;
353 int evtchn = evtchn_from_irq(irq);
354
355 spin_lock(&irq_mapping_update_lock);
356
Jeremy Fitzhardinge0f2287a2008-05-26 23:31:24 +0100357 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700358 close.port = evtchn;
359 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
360 BUG();
361
362 switch (type_from_irq(irq)) {
363 case IRQT_VIRQ:
364 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
365 [index_from_irq(irq)] = -1;
366 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100367 case IRQT_IPI:
368 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
369 [index_from_irq(irq)] = -1;
370 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700371 default:
372 break;
373 }
374
375 /* Closed ports are implicitly re-bound to VCPU0. */
376 bind_evtchn_to_cpu(evtchn, 0);
377
378 evtchn_to_irq[evtchn] = -1;
379 irq_info[irq] = IRQ_UNBOUND;
380
Jeremy Fitzhardinge0f2287a2008-05-26 23:31:24 +0100381 dynamic_irq_cleanup(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700382 }
383
384 spin_unlock(&irq_mapping_update_lock);
385}
386
387int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400388 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700389 unsigned long irqflags,
390 const char *devname, void *dev_id)
391{
392 unsigned int irq;
393 int retval;
394
395 irq = bind_evtchn_to_irq(evtchn);
396 retval = request_irq(irq, handler, irqflags, devname, dev_id);
397 if (retval != 0) {
398 unbind_from_irq(irq);
399 return retval;
400 }
401
402 return irq;
403}
404EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
405
406int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -0400407 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700408 unsigned long irqflags, const char *devname, void *dev_id)
409{
410 unsigned int irq;
411 int retval;
412
413 irq = bind_virq_to_irq(virq, cpu);
414 retval = request_irq(irq, handler, irqflags, devname, dev_id);
415 if (retval != 0) {
416 unbind_from_irq(irq);
417 return retval;
418 }
419
420 return irq;
421}
422EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
423
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700424int bind_ipi_to_irqhandler(enum ipi_vector ipi,
425 unsigned int cpu,
426 irq_handler_t handler,
427 unsigned long irqflags,
428 const char *devname,
429 void *dev_id)
430{
431 int irq, retval;
432
433 irq = bind_ipi_to_irq(ipi, cpu);
434 if (irq < 0)
435 return irq;
436
437 retval = request_irq(irq, handler, irqflags, devname, dev_id);
438 if (retval != 0) {
439 unbind_from_irq(irq);
440 return retval;
441 }
442
443 return irq;
444}
445
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700446void unbind_from_irqhandler(unsigned int irq, void *dev_id)
447{
448 free_irq(irq, dev_id);
449 unbind_from_irq(irq);
450}
451EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
452
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700453void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
454{
455 int irq = per_cpu(ipi_to_irq, cpu)[vector];
456 BUG_ON(irq < 0);
457 notify_remote_via_irq(irq);
458}
459
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -0700460irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
461{
462 struct shared_info *sh = HYPERVISOR_shared_info;
463 int cpu = smp_processor_id();
464 int i;
465 unsigned long flags;
466 static DEFINE_SPINLOCK(debug_lock);
467
468 spin_lock_irqsave(&debug_lock, flags);
469
470 printk("vcpu %d\n ", cpu);
471
472 for_each_online_cpu(i) {
473 struct vcpu_info *v = per_cpu(xen_vcpu, i);
474 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700475 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -0700476 v->evtchn_upcall_pending,
477 v->evtchn_pending_sel);
478 }
479 printk("pending:\n ");
480 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
481 printk("%08lx%s", sh->evtchn_pending[i],
482 i % 8 == 0 ? "\n " : " ");
483 printk("\nmasks:\n ");
484 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
485 printk("%08lx%s", sh->evtchn_mask[i],
486 i % 8 == 0 ? "\n " : " ");
487
488 printk("\nunmasked:\n ");
489 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
490 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
491 i % 8 == 0 ? "\n " : " ");
492
493 printk("\npending list:\n");
494 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
495 if (sync_test_bit(i, sh->evtchn_pending)) {
496 printk(" %d: event %d -> irq %d\n",
497 cpu_evtchn[i], i,
498 evtchn_to_irq[i]);
499 }
500 }
501
502 spin_unlock_irqrestore(&debug_lock, flags);
503
504 return IRQ_HANDLED;
505}
506
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700507
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700508/*
509 * Search the CPUs pending events bitmasks. For each one found, map
510 * the event number to an irq, and feed it into do_IRQ() for
511 * handling.
512 *
513 * Xen uses a two-level bitmap to speed searching. The first level is
514 * a bitset of words which contain pending event bits. The second
515 * level is a bitset of pending events themselves.
516 */
Harvey Harrison75604d72008-01-30 13:31:17 +0100517void xen_evtchn_do_upcall(struct pt_regs *regs)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700518{
519 int cpu = get_cpu();
520 struct shared_info *s = HYPERVISOR_shared_info;
521 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700522 static DEFINE_PER_CPU(unsigned, nesting_count);
523 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700524
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700525 do {
526 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700527
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700528 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700529
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700530 if (__get_cpu_var(nesting_count)++)
531 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700532
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700533#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
534 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -0700535 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700536#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700537 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
538 while (pending_words != 0) {
539 unsigned long pending_bits;
540 int word_idx = __ffs(pending_words);
541 pending_words &= ~(1UL << word_idx);
542
543 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
544 int bit_idx = __ffs(pending_bits);
545 int port = (word_idx * BITS_PER_LONG) + bit_idx;
546 int irq = evtchn_to_irq[port];
547
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700548 if (irq != -1)
549 xen_do_IRQ(irq, regs);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700550 }
551 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700552
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700553 BUG_ON(!irqs_disabled());
554
555 count = __get_cpu_var(nesting_count);
556 __get_cpu_var(nesting_count) = 0;
557 } while(count != 1);
558
559out:
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700560 put_cpu();
561}
562
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +0100563/* Rebind a new event channel to an existing irq. */
564void rebind_evtchn_irq(int evtchn, int irq)
565{
566 /* Make sure the irq is masked, since the new event channel
567 will also be masked. */
568 disable_irq(irq);
569
570 spin_lock(&irq_mapping_update_lock);
571
572 /* After resume the irq<->evtchn mappings are all cleared out */
573 BUG_ON(evtchn_to_irq[evtchn] != -1);
574 /* Expect irq to have been bound before,
575 so the bindcount should be non-0 */
576 BUG_ON(irq_bindcount[irq] == 0);
577
578 evtchn_to_irq[evtchn] = irq;
579 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
580
581 spin_unlock(&irq_mapping_update_lock);
582
583 /* new event channels are always bound to cpu 0 */
584 irq_set_affinity(irq, cpumask_of_cpu(0));
585
586 /* Unmask the event channel. */
587 enable_irq(irq);
588}
589
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700590/* Rebind an evtchn so that it gets delivered to a specific cpu */
591static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
592{
593 struct evtchn_bind_vcpu bind_vcpu;
594 int evtchn = evtchn_from_irq(irq);
595
596 if (!VALID_EVTCHN(evtchn))
597 return;
598
599 /* Send future instances of this interrupt to other vcpu. */
600 bind_vcpu.port = evtchn;
601 bind_vcpu.vcpu = tcpu;
602
603 /*
604 * If this fails, it usually just indicates that we're dealing with a
605 * virq or IPI channel, which don't actually need to be rebound. Ignore
606 * it, but don't do the xenlinux-level rebind in that case.
607 */
608 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
609 bind_evtchn_to_cpu(evtchn, tcpu);
610}
611
612
613static void set_affinity_irq(unsigned irq, cpumask_t dest)
614{
615 unsigned tcpu = first_cpu(dest);
616 rebind_irq_to_cpu(irq, tcpu);
617}
618
Isaku Yamahata642e0c82008-04-02 10:53:57 -0700619int resend_irq_on_evtchn(unsigned int irq)
620{
621 int masked, evtchn = evtchn_from_irq(irq);
622 struct shared_info *s = HYPERVISOR_shared_info;
623
624 if (!VALID_EVTCHN(evtchn))
625 return 1;
626
627 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
628 sync_set_bit(evtchn, s->evtchn_pending);
629 if (!masked)
630 unmask_evtchn(evtchn);
631
632 return 1;
633}
634
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700635static void enable_dynirq(unsigned int irq)
636{
637 int evtchn = evtchn_from_irq(irq);
638
639 if (VALID_EVTCHN(evtchn))
640 unmask_evtchn(evtchn);
641}
642
643static void disable_dynirq(unsigned int irq)
644{
645 int evtchn = evtchn_from_irq(irq);
646
647 if (VALID_EVTCHN(evtchn))
648 mask_evtchn(evtchn);
649}
650
651static void ack_dynirq(unsigned int irq)
652{
653 int evtchn = evtchn_from_irq(irq);
654
655 move_native_irq(irq);
656
657 if (VALID_EVTCHN(evtchn))
658 clear_evtchn(evtchn);
659}
660
661static int retrigger_dynirq(unsigned int irq)
662{
663 int evtchn = evtchn_from_irq(irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -0700664 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700665 int ret = 0;
666
667 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -0700668 int masked;
669
670 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
671 sync_set_bit(evtchn, sh->evtchn_pending);
672 if (!masked)
673 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700674 ret = 1;
675 }
676
677 return ret;
678}
679
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100680static void restore_cpu_virqs(unsigned int cpu)
681{
682 struct evtchn_bind_virq bind_virq;
683 int virq, irq, evtchn;
684
685 for (virq = 0; virq < NR_VIRQS; virq++) {
686 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
687 continue;
688
689 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
690 BUG_ON(irq_info[irq].index != virq);
691
692 /* Get a new binding from Xen. */
693 bind_virq.virq = virq;
694 bind_virq.vcpu = cpu;
695 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
696 &bind_virq) != 0)
697 BUG();
698 evtchn = bind_virq.port;
699
700 /* Record the new mapping. */
701 evtchn_to_irq[evtchn] = irq;
702 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
703 bind_evtchn_to_cpu(evtchn, cpu);
704
705 /* Ready for use. */
706 unmask_evtchn(evtchn);
707 }
708}
709
710static void restore_cpu_ipis(unsigned int cpu)
711{
712 struct evtchn_bind_ipi bind_ipi;
713 int ipi, irq, evtchn;
714
715 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
716 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
717 continue;
718
719 BUG_ON(irq_info[irq].type != IRQT_IPI);
720 BUG_ON(irq_info[irq].index != ipi);
721
722 /* Get a new binding from Xen. */
723 bind_ipi.vcpu = cpu;
724 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
725 &bind_ipi) != 0)
726 BUG();
727 evtchn = bind_ipi.port;
728
729 /* Record the new mapping. */
730 evtchn_to_irq[evtchn] = irq;
731 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
732 bind_evtchn_to_cpu(evtchn, cpu);
733
734 /* Ready for use. */
735 unmask_evtchn(evtchn);
736
737 }
738}
739
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -0700740/* Clear an irq's pending state, in preparation for polling on it */
741void xen_clear_irq_pending(int irq)
742{
743 int evtchn = evtchn_from_irq(irq);
744
745 if (VALID_EVTCHN(evtchn))
746 clear_evtchn(evtchn);
747}
748
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700749void xen_set_irq_pending(int irq)
750{
751 int evtchn = evtchn_from_irq(irq);
752
753 if (VALID_EVTCHN(evtchn))
754 set_evtchn(evtchn);
755}
756
757bool xen_test_irq_pending(int irq)
758{
759 int evtchn = evtchn_from_irq(irq);
760 bool ret = false;
761
762 if (VALID_EVTCHN(evtchn))
763 ret = test_evtchn(evtchn);
764
765 return ret;
766}
767
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -0700768/* Poll waiting for an irq to become pending. In the usual case, the
769 irq will be disabled so it won't deliver an interrupt. */
770void xen_poll_irq(int irq)
771{
772 evtchn_port_t evtchn = evtchn_from_irq(irq);
773
774 if (VALID_EVTCHN(evtchn)) {
775 struct sched_poll poll;
776
777 poll.nr_ports = 1;
778 poll.timeout = 0;
779 poll.ports = &evtchn;
780
781 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
782 BUG();
783 }
784}
785
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100786void xen_irq_resume(void)
787{
788 unsigned int cpu, irq, evtchn;
789
790 init_evtchn_cpu_bindings();
791
792 /* New event-channel space is not 'live' yet. */
793 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
794 mask_evtchn(evtchn);
795
796 /* No IRQ <-> event-channel mappings. */
Yinghai Lu5a15d7e2008-08-19 20:49:57 -0700797 for (irq = 0; irq < nr_irqs; irq++)
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100798 irq_info[irq].evtchn = 0; /* zap event-channel binding */
799
800 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
801 evtchn_to_irq[evtchn] = -1;
802
803 for_each_possible_cpu(cpu) {
804 restore_cpu_virqs(cpu);
805 restore_cpu_ipis(cpu);
806 }
807}
808
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700809static struct irq_chip xen_dynamic_chip __read_mostly = {
810 .name = "xen-dyn",
811 .mask = disable_dynirq,
812 .unmask = enable_dynirq,
813 .ack = ack_dynirq,
814 .set_affinity = set_affinity_irq,
815 .retrigger = retrigger_dynirq,
816};
817
818void __init xen_init_IRQ(void)
819{
820 int i;
821
822 init_evtchn_cpu_bindings();
823
824 /* No event channels are 'live' right now. */
825 for (i = 0; i < NR_EVENT_CHANNELS; i++)
826 mask_evtchn(i);
827
828 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
Yinghai Lu5a15d7e2008-08-19 20:49:57 -0700829 for (i = 0; i < nr_irqs; i++)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700830 irq_bindcount[i] = 0;
831
832 irq_ctx_init(smp_processor_id());
833}