Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Xen event channels |
| 3 | * |
| 4 | * Xen models interrupts with abstract event channels. Because each |
| 5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we |
| 6 | * must dynamically map irqs<->event channels. The event channels |
| 7 | * interface with the rest of the kernel by defining a xen interrupt |
| 8 | * chip. When an event is recieved, it is mapped to an irq and sent |
| 9 | * through the normal interrupt processing path. |
| 10 | * |
| 11 | * There are four kinds of events which can be mapped to an event |
| 12 | * channel: |
| 13 | * |
| 14 | * 1. Inter-domain notifications. This includes all the virtual |
| 15 | * device events, since they're driven by front-ends in another domain |
| 16 | * (typically dom0). |
| 17 | * 2. VIRQs, typically used for timers. These are per-cpu events. |
| 18 | * 3. IPIs. |
| 19 | * 4. Hardware interrupts. Not supported at present. |
| 20 | * |
| 21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 22 | */ |
| 23 | |
| 24 | #include <linux/linkage.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/irq.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/string.h> |
| 29 | |
| 30 | #include <asm/ptrace.h> |
| 31 | #include <asm/irq.h> |
| 32 | #include <asm/sync_bitops.h> |
| 33 | #include <asm/xen/hypercall.h> |
Adrian Bunk | 8d1b875 | 2007-07-20 00:31:44 -0700 | [diff] [blame] | 34 | #include <asm/xen/hypervisor.h> |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 35 | |
Isaku Yamahata | e04d0d0 | 2008-04-02 10:53:55 -0700 | [diff] [blame] | 36 | #include <xen/xen-ops.h> |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 37 | #include <xen/events.h> |
| 38 | #include <xen/interface/xen.h> |
| 39 | #include <xen/interface/event_channel.h> |
| 40 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 41 | /* |
| 42 | * This lock protects updates to the following mapping and reference-count |
| 43 | * arrays. The lock does not need to be acquired to read the mapping tables. |
| 44 | */ |
| 45 | static DEFINE_SPINLOCK(irq_mapping_update_lock); |
| 46 | |
| 47 | /* IRQ <-> VIRQ mapping. */ |
| 48 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; |
| 49 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 50 | /* IRQ <-> IPI mapping */ |
| 51 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; |
| 52 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 53 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ |
| 54 | struct packed_irq |
| 55 | { |
| 56 | unsigned short evtchn; |
| 57 | unsigned char index; |
| 58 | unsigned char type; |
| 59 | }; |
| 60 | |
| 61 | static struct packed_irq irq_info[NR_IRQS]; |
| 62 | |
| 63 | /* Binding types. */ |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 64 | enum { |
| 65 | IRQT_UNBOUND, |
| 66 | IRQT_PIRQ, |
| 67 | IRQT_VIRQ, |
| 68 | IRQT_IPI, |
| 69 | IRQT_EVTCHN |
| 70 | }; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 71 | |
| 72 | /* Convenient shorthand for packed representation of an unbound IRQ. */ |
| 73 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) |
| 74 | |
| 75 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { |
| 76 | [0 ... NR_EVENT_CHANNELS-1] = -1 |
| 77 | }; |
| 78 | static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; |
| 79 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; |
| 80 | |
| 81 | /* Reference counts for bindings to IRQs. */ |
| 82 | static int irq_bindcount[NR_IRQS]; |
| 83 | |
| 84 | /* Xen will never allocate port zero for any purpose. */ |
| 85 | #define VALID_EVTCHN(chn) ((chn) != 0) |
| 86 | |
| 87 | /* |
| 88 | * Force a proper event-channel callback from Xen after clearing the |
| 89 | * callback mask. We do this in a very simple manner, by making a call |
| 90 | * down into Xen. The pending flag will be checked by Xen on return. |
| 91 | */ |
| 92 | void force_evtchn_callback(void) |
| 93 | { |
| 94 | (void)HYPERVISOR_xen_version(0, NULL); |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(force_evtchn_callback); |
| 97 | |
| 98 | static struct irq_chip xen_dynamic_chip; |
| 99 | |
| 100 | /* Constructor for packed IRQ information. */ |
| 101 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) |
| 102 | { |
| 103 | return (struct packed_irq) { evtchn, index, type }; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Accessors for packed IRQ information. |
| 108 | */ |
| 109 | static inline unsigned int evtchn_from_irq(int irq) |
| 110 | { |
| 111 | return irq_info[irq].evtchn; |
| 112 | } |
| 113 | |
| 114 | static inline unsigned int index_from_irq(int irq) |
| 115 | { |
| 116 | return irq_info[irq].index; |
| 117 | } |
| 118 | |
| 119 | static inline unsigned int type_from_irq(int irq) |
| 120 | { |
| 121 | return irq_info[irq].type; |
| 122 | } |
| 123 | |
| 124 | static inline unsigned long active_evtchns(unsigned int cpu, |
| 125 | struct shared_info *sh, |
| 126 | unsigned int idx) |
| 127 | { |
| 128 | return (sh->evtchn_pending[idx] & |
| 129 | cpu_evtchn_mask[cpu][idx] & |
| 130 | ~sh->evtchn_mask[idx]); |
| 131 | } |
| 132 | |
| 133 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) |
| 134 | { |
| 135 | int irq = evtchn_to_irq[chn]; |
| 136 | |
| 137 | BUG_ON(irq == -1); |
| 138 | #ifdef CONFIG_SMP |
| 139 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
| 140 | #endif |
| 141 | |
| 142 | __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); |
| 143 | __set_bit(chn, cpu_evtchn_mask[cpu]); |
| 144 | |
| 145 | cpu_evtchn[chn] = cpu; |
| 146 | } |
| 147 | |
| 148 | static void init_evtchn_cpu_bindings(void) |
| 149 | { |
| 150 | #ifdef CONFIG_SMP |
| 151 | int i; |
| 152 | /* By default all event channels notify CPU#0. */ |
| 153 | for (i = 0; i < NR_IRQS; i++) |
| 154 | irq_desc[i].affinity = cpumask_of_cpu(0); |
| 155 | #endif |
| 156 | |
| 157 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); |
| 158 | memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); |
| 159 | } |
| 160 | |
| 161 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) |
| 162 | { |
| 163 | return cpu_evtchn[evtchn]; |
| 164 | } |
| 165 | |
| 166 | static inline void clear_evtchn(int port) |
| 167 | { |
| 168 | struct shared_info *s = HYPERVISOR_shared_info; |
| 169 | sync_clear_bit(port, &s->evtchn_pending[0]); |
| 170 | } |
| 171 | |
| 172 | static inline void set_evtchn(int port) |
| 173 | { |
| 174 | struct shared_info *s = HYPERVISOR_shared_info; |
| 175 | sync_set_bit(port, &s->evtchn_pending[0]); |
| 176 | } |
| 177 | |
| 178 | |
| 179 | /** |
| 180 | * notify_remote_via_irq - send event to remote end of event channel via irq |
| 181 | * @irq: irq of event channel to send event to |
| 182 | * |
| 183 | * Unlike notify_remote_via_evtchn(), this is safe to use across |
| 184 | * save/restore. Notifications on a broken connection are silently |
| 185 | * dropped. |
| 186 | */ |
| 187 | void notify_remote_via_irq(int irq) |
| 188 | { |
| 189 | int evtchn = evtchn_from_irq(irq); |
| 190 | |
| 191 | if (VALID_EVTCHN(evtchn)) |
| 192 | notify_remote_via_evtchn(evtchn); |
| 193 | } |
| 194 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); |
| 195 | |
| 196 | static void mask_evtchn(int port) |
| 197 | { |
| 198 | struct shared_info *s = HYPERVISOR_shared_info; |
| 199 | sync_set_bit(port, &s->evtchn_mask[0]); |
| 200 | } |
| 201 | |
| 202 | static void unmask_evtchn(int port) |
| 203 | { |
| 204 | struct shared_info *s = HYPERVISOR_shared_info; |
| 205 | unsigned int cpu = get_cpu(); |
| 206 | |
| 207 | BUG_ON(!irqs_disabled()); |
| 208 | |
| 209 | /* Slow path (hypercall) if this is a non-local port. */ |
| 210 | if (unlikely(cpu != cpu_from_evtchn(port))) { |
| 211 | struct evtchn_unmask unmask = { .port = port }; |
| 212 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
| 213 | } else { |
| 214 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
| 215 | |
| 216 | sync_clear_bit(port, &s->evtchn_mask[0]); |
| 217 | |
| 218 | /* |
| 219 | * The following is basically the equivalent of |
| 220 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose |
| 221 | * the interrupt edge' if the channel is masked. |
| 222 | */ |
| 223 | if (sync_test_bit(port, &s->evtchn_pending[0]) && |
| 224 | !sync_test_and_set_bit(port / BITS_PER_LONG, |
| 225 | &vcpu_info->evtchn_pending_sel)) |
| 226 | vcpu_info->evtchn_upcall_pending = 1; |
| 227 | } |
| 228 | |
| 229 | put_cpu(); |
| 230 | } |
| 231 | |
| 232 | static int find_unbound_irq(void) |
| 233 | { |
| 234 | int irq; |
| 235 | |
| 236 | /* Only allocate from dynirq range */ |
| 237 | for (irq = 0; irq < NR_IRQS; irq++) |
| 238 | if (irq_bindcount[irq] == 0) |
| 239 | break; |
| 240 | |
| 241 | if (irq == NR_IRQS) |
| 242 | panic("No available IRQ to bind to: increase NR_IRQS!\n"); |
| 243 | |
| 244 | return irq; |
| 245 | } |
| 246 | |
Jeremy Fitzhardinge | b536b4b | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 247 | int bind_evtchn_to_irq(unsigned int evtchn) |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 248 | { |
| 249 | int irq; |
| 250 | |
| 251 | spin_lock(&irq_mapping_update_lock); |
| 252 | |
| 253 | irq = evtchn_to_irq[evtchn]; |
| 254 | |
| 255 | if (irq == -1) { |
| 256 | irq = find_unbound_irq(); |
| 257 | |
| 258 | dynamic_irq_init(irq); |
| 259 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
| 260 | handle_level_irq, "event"); |
| 261 | |
| 262 | evtchn_to_irq[evtchn] = irq; |
| 263 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); |
| 264 | } |
| 265 | |
| 266 | irq_bindcount[irq]++; |
| 267 | |
| 268 | spin_unlock(&irq_mapping_update_lock); |
| 269 | |
| 270 | return irq; |
| 271 | } |
Jeremy Fitzhardinge | b536b4b | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 272 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 273 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 274 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
| 275 | { |
| 276 | struct evtchn_bind_ipi bind_ipi; |
| 277 | int evtchn, irq; |
| 278 | |
| 279 | spin_lock(&irq_mapping_update_lock); |
| 280 | |
| 281 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
| 282 | if (irq == -1) { |
| 283 | irq = find_unbound_irq(); |
| 284 | if (irq < 0) |
| 285 | goto out; |
| 286 | |
| 287 | dynamic_irq_init(irq); |
| 288 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
| 289 | handle_level_irq, "ipi"); |
| 290 | |
| 291 | bind_ipi.vcpu = cpu; |
| 292 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, |
| 293 | &bind_ipi) != 0) |
| 294 | BUG(); |
| 295 | evtchn = bind_ipi.port; |
| 296 | |
| 297 | evtchn_to_irq[evtchn] = irq; |
| 298 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); |
| 299 | |
| 300 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
| 301 | |
| 302 | bind_evtchn_to_cpu(evtchn, cpu); |
| 303 | } |
| 304 | |
| 305 | irq_bindcount[irq]++; |
| 306 | |
| 307 | out: |
| 308 | spin_unlock(&irq_mapping_update_lock); |
| 309 | return irq; |
| 310 | } |
| 311 | |
| 312 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 313 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
| 314 | { |
| 315 | struct evtchn_bind_virq bind_virq; |
| 316 | int evtchn, irq; |
| 317 | |
| 318 | spin_lock(&irq_mapping_update_lock); |
| 319 | |
| 320 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
| 321 | |
| 322 | if (irq == -1) { |
| 323 | bind_virq.virq = virq; |
| 324 | bind_virq.vcpu = cpu; |
| 325 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, |
| 326 | &bind_virq) != 0) |
| 327 | BUG(); |
| 328 | evtchn = bind_virq.port; |
| 329 | |
| 330 | irq = find_unbound_irq(); |
| 331 | |
| 332 | dynamic_irq_init(irq); |
| 333 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
| 334 | handle_level_irq, "virq"); |
| 335 | |
| 336 | evtchn_to_irq[evtchn] = irq; |
| 337 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); |
| 338 | |
| 339 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
| 340 | |
| 341 | bind_evtchn_to_cpu(evtchn, cpu); |
| 342 | } |
| 343 | |
| 344 | irq_bindcount[irq]++; |
| 345 | |
| 346 | spin_unlock(&irq_mapping_update_lock); |
| 347 | |
| 348 | return irq; |
| 349 | } |
| 350 | |
| 351 | static void unbind_from_irq(unsigned int irq) |
| 352 | { |
| 353 | struct evtchn_close close; |
| 354 | int evtchn = evtchn_from_irq(irq); |
| 355 | |
| 356 | spin_lock(&irq_mapping_update_lock); |
| 357 | |
Jeremy Fitzhardinge | 0f2287a | 2008-05-26 23:31:24 +0100 | [diff] [blame] | 358 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 359 | close.port = evtchn; |
| 360 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
| 361 | BUG(); |
| 362 | |
| 363 | switch (type_from_irq(irq)) { |
| 364 | case IRQT_VIRQ: |
| 365 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
| 366 | [index_from_irq(irq)] = -1; |
| 367 | break; |
| 368 | default: |
| 369 | break; |
| 370 | } |
| 371 | |
| 372 | /* Closed ports are implicitly re-bound to VCPU0. */ |
| 373 | bind_evtchn_to_cpu(evtchn, 0); |
| 374 | |
| 375 | evtchn_to_irq[evtchn] = -1; |
| 376 | irq_info[irq] = IRQ_UNBOUND; |
| 377 | |
Jeremy Fitzhardinge | 0f2287a | 2008-05-26 23:31:24 +0100 | [diff] [blame] | 378 | dynamic_irq_cleanup(irq); |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | spin_unlock(&irq_mapping_update_lock); |
| 382 | } |
| 383 | |
| 384 | int bind_evtchn_to_irqhandler(unsigned int evtchn, |
Jeff Garzik | 7c23997 | 2007-10-19 03:12:20 -0400 | [diff] [blame] | 385 | irq_handler_t handler, |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 386 | unsigned long irqflags, |
| 387 | const char *devname, void *dev_id) |
| 388 | { |
| 389 | unsigned int irq; |
| 390 | int retval; |
| 391 | |
| 392 | irq = bind_evtchn_to_irq(evtchn); |
| 393 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 394 | if (retval != 0) { |
| 395 | unbind_from_irq(irq); |
| 396 | return retval; |
| 397 | } |
| 398 | |
| 399 | return irq; |
| 400 | } |
| 401 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); |
| 402 | |
| 403 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
Jeff Garzik | 7c23997 | 2007-10-19 03:12:20 -0400 | [diff] [blame] | 404 | irq_handler_t handler, |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 405 | unsigned long irqflags, const char *devname, void *dev_id) |
| 406 | { |
| 407 | unsigned int irq; |
| 408 | int retval; |
| 409 | |
| 410 | irq = bind_virq_to_irq(virq, cpu); |
| 411 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 412 | if (retval != 0) { |
| 413 | unbind_from_irq(irq); |
| 414 | return retval; |
| 415 | } |
| 416 | |
| 417 | return irq; |
| 418 | } |
| 419 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); |
| 420 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 421 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
| 422 | unsigned int cpu, |
| 423 | irq_handler_t handler, |
| 424 | unsigned long irqflags, |
| 425 | const char *devname, |
| 426 | void *dev_id) |
| 427 | { |
| 428 | int irq, retval; |
| 429 | |
| 430 | irq = bind_ipi_to_irq(ipi, cpu); |
| 431 | if (irq < 0) |
| 432 | return irq; |
| 433 | |
| 434 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
| 435 | if (retval != 0) { |
| 436 | unbind_from_irq(irq); |
| 437 | return retval; |
| 438 | } |
| 439 | |
| 440 | return irq; |
| 441 | } |
| 442 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 443 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
| 444 | { |
| 445 | free_irq(irq, dev_id); |
| 446 | unbind_from_irq(irq); |
| 447 | } |
| 448 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); |
| 449 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 450 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
| 451 | { |
| 452 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; |
| 453 | BUG_ON(irq < 0); |
| 454 | notify_remote_via_irq(irq); |
| 455 | } |
| 456 | |
Jeremy Fitzhardinge | ee523ca | 2008-03-17 16:37:18 -0700 | [diff] [blame] | 457 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
| 458 | { |
| 459 | struct shared_info *sh = HYPERVISOR_shared_info; |
| 460 | int cpu = smp_processor_id(); |
| 461 | int i; |
| 462 | unsigned long flags; |
| 463 | static DEFINE_SPINLOCK(debug_lock); |
| 464 | |
| 465 | spin_lock_irqsave(&debug_lock, flags); |
| 466 | |
| 467 | printk("vcpu %d\n ", cpu); |
| 468 | |
| 469 | for_each_online_cpu(i) { |
| 470 | struct vcpu_info *v = per_cpu(xen_vcpu, i); |
| 471 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, |
Isaku Yamahata | e849c3e | 2008-04-02 10:53:56 -0700 | [diff] [blame] | 472 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
Jeremy Fitzhardinge | ee523ca | 2008-03-17 16:37:18 -0700 | [diff] [blame] | 473 | v->evtchn_upcall_pending, |
| 474 | v->evtchn_pending_sel); |
| 475 | } |
| 476 | printk("pending:\n "); |
| 477 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) |
| 478 | printk("%08lx%s", sh->evtchn_pending[i], |
| 479 | i % 8 == 0 ? "\n " : " "); |
| 480 | printk("\nmasks:\n "); |
| 481 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
| 482 | printk("%08lx%s", sh->evtchn_mask[i], |
| 483 | i % 8 == 0 ? "\n " : " "); |
| 484 | |
| 485 | printk("\nunmasked:\n "); |
| 486 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
| 487 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], |
| 488 | i % 8 == 0 ? "\n " : " "); |
| 489 | |
| 490 | printk("\npending list:\n"); |
| 491 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
| 492 | if (sync_test_bit(i, sh->evtchn_pending)) { |
| 493 | printk(" %d: event %d -> irq %d\n", |
| 494 | cpu_evtchn[i], i, |
| 495 | evtchn_to_irq[i]); |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | spin_unlock_irqrestore(&debug_lock, flags); |
| 500 | |
| 501 | return IRQ_HANDLED; |
| 502 | } |
| 503 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 504 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 505 | /* |
| 506 | * Search the CPUs pending events bitmasks. For each one found, map |
| 507 | * the event number to an irq, and feed it into do_IRQ() for |
| 508 | * handling. |
| 509 | * |
| 510 | * Xen uses a two-level bitmap to speed searching. The first level is |
| 511 | * a bitset of words which contain pending event bits. The second |
| 512 | * level is a bitset of pending events themselves. |
| 513 | */ |
Harvey Harrison | 75604d7 | 2008-01-30 13:31:17 +0100 | [diff] [blame] | 514 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 515 | { |
| 516 | int cpu = get_cpu(); |
| 517 | struct shared_info *s = HYPERVISOR_shared_info; |
| 518 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 519 | static DEFINE_PER_CPU(unsigned, nesting_count); |
| 520 | unsigned count; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 521 | |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 522 | do { |
| 523 | unsigned long pending_words; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 524 | |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 525 | vcpu_info->evtchn_upcall_pending = 0; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 526 | |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 527 | if (__get_cpu_var(nesting_count)++) |
| 528 | goto out; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 529 | |
Isaku Yamahata | e849c3e | 2008-04-02 10:53:56 -0700 | [diff] [blame] | 530 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
| 531 | /* Clear master flag /before/ clearing selector flag. */ |
Isaku Yamahata | 6673cf6 | 2008-06-16 14:58:13 -0700 | [diff] [blame] | 532 | wmb(); |
Isaku Yamahata | e849c3e | 2008-04-02 10:53:56 -0700 | [diff] [blame] | 533 | #endif |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 534 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
| 535 | while (pending_words != 0) { |
| 536 | unsigned long pending_bits; |
| 537 | int word_idx = __ffs(pending_words); |
| 538 | pending_words &= ~(1UL << word_idx); |
| 539 | |
| 540 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { |
| 541 | int bit_idx = __ffs(pending_bits); |
| 542 | int port = (word_idx * BITS_PER_LONG) + bit_idx; |
| 543 | int irq = evtchn_to_irq[port]; |
| 544 | |
Isaku Yamahata | e849c3e | 2008-04-02 10:53:56 -0700 | [diff] [blame] | 545 | if (irq != -1) |
| 546 | xen_do_IRQ(irq, regs); |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 547 | } |
| 548 | } |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 549 | |
Jeremy Fitzhardinge | 229664b | 2008-03-17 16:37:20 -0700 | [diff] [blame] | 550 | BUG_ON(!irqs_disabled()); |
| 551 | |
| 552 | count = __get_cpu_var(nesting_count); |
| 553 | __get_cpu_var(nesting_count) = 0; |
| 554 | } while(count != 1); |
| 555 | |
| 556 | out: |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 557 | put_cpu(); |
| 558 | } |
| 559 | |
Jeremy Fitzhardinge | eb1e305 | 2008-05-26 23:31:23 +0100 | [diff] [blame] | 560 | /* Rebind a new event channel to an existing irq. */ |
| 561 | void rebind_evtchn_irq(int evtchn, int irq) |
| 562 | { |
| 563 | /* Make sure the irq is masked, since the new event channel |
| 564 | will also be masked. */ |
| 565 | disable_irq(irq); |
| 566 | |
| 567 | spin_lock(&irq_mapping_update_lock); |
| 568 | |
| 569 | /* After resume the irq<->evtchn mappings are all cleared out */ |
| 570 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
| 571 | /* Expect irq to have been bound before, |
| 572 | so the bindcount should be non-0 */ |
| 573 | BUG_ON(irq_bindcount[irq] == 0); |
| 574 | |
| 575 | evtchn_to_irq[evtchn] = irq; |
| 576 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); |
| 577 | |
| 578 | spin_unlock(&irq_mapping_update_lock); |
| 579 | |
| 580 | /* new event channels are always bound to cpu 0 */ |
| 581 | irq_set_affinity(irq, cpumask_of_cpu(0)); |
| 582 | |
| 583 | /* Unmask the event channel. */ |
| 584 | enable_irq(irq); |
| 585 | } |
| 586 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 587 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
| 588 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
| 589 | { |
| 590 | struct evtchn_bind_vcpu bind_vcpu; |
| 591 | int evtchn = evtchn_from_irq(irq); |
| 592 | |
| 593 | if (!VALID_EVTCHN(evtchn)) |
| 594 | return; |
| 595 | |
| 596 | /* Send future instances of this interrupt to other vcpu. */ |
| 597 | bind_vcpu.port = evtchn; |
| 598 | bind_vcpu.vcpu = tcpu; |
| 599 | |
| 600 | /* |
| 601 | * If this fails, it usually just indicates that we're dealing with a |
| 602 | * virq or IPI channel, which don't actually need to be rebound. Ignore |
| 603 | * it, but don't do the xenlinux-level rebind in that case. |
| 604 | */ |
| 605 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
| 606 | bind_evtchn_to_cpu(evtchn, tcpu); |
| 607 | } |
| 608 | |
| 609 | |
| 610 | static void set_affinity_irq(unsigned irq, cpumask_t dest) |
| 611 | { |
| 612 | unsigned tcpu = first_cpu(dest); |
| 613 | rebind_irq_to_cpu(irq, tcpu); |
| 614 | } |
| 615 | |
Isaku Yamahata | 642e0c8 | 2008-04-02 10:53:57 -0700 | [diff] [blame] | 616 | int resend_irq_on_evtchn(unsigned int irq) |
| 617 | { |
| 618 | int masked, evtchn = evtchn_from_irq(irq); |
| 619 | struct shared_info *s = HYPERVISOR_shared_info; |
| 620 | |
| 621 | if (!VALID_EVTCHN(evtchn)) |
| 622 | return 1; |
| 623 | |
| 624 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); |
| 625 | sync_set_bit(evtchn, s->evtchn_pending); |
| 626 | if (!masked) |
| 627 | unmask_evtchn(evtchn); |
| 628 | |
| 629 | return 1; |
| 630 | } |
| 631 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 632 | static void enable_dynirq(unsigned int irq) |
| 633 | { |
| 634 | int evtchn = evtchn_from_irq(irq); |
| 635 | |
| 636 | if (VALID_EVTCHN(evtchn)) |
| 637 | unmask_evtchn(evtchn); |
| 638 | } |
| 639 | |
| 640 | static void disable_dynirq(unsigned int irq) |
| 641 | { |
| 642 | int evtchn = evtchn_from_irq(irq); |
| 643 | |
| 644 | if (VALID_EVTCHN(evtchn)) |
| 645 | mask_evtchn(evtchn); |
| 646 | } |
| 647 | |
| 648 | static void ack_dynirq(unsigned int irq) |
| 649 | { |
| 650 | int evtchn = evtchn_from_irq(irq); |
| 651 | |
| 652 | move_native_irq(irq); |
| 653 | |
| 654 | if (VALID_EVTCHN(evtchn)) |
| 655 | clear_evtchn(evtchn); |
| 656 | } |
| 657 | |
| 658 | static int retrigger_dynirq(unsigned int irq) |
| 659 | { |
| 660 | int evtchn = evtchn_from_irq(irq); |
Jeremy Fitzhardinge | ee8fa1c | 2008-03-17 16:37:19 -0700 | [diff] [blame] | 661 | struct shared_info *sh = HYPERVISOR_shared_info; |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 662 | int ret = 0; |
| 663 | |
| 664 | if (VALID_EVTCHN(evtchn)) { |
Jeremy Fitzhardinge | ee8fa1c | 2008-03-17 16:37:19 -0700 | [diff] [blame] | 665 | int masked; |
| 666 | |
| 667 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); |
| 668 | sync_set_bit(evtchn, sh->evtchn_pending); |
| 669 | if (!masked) |
| 670 | unmask_evtchn(evtchn); |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 671 | ret = 1; |
| 672 | } |
| 673 | |
| 674 | return ret; |
| 675 | } |
| 676 | |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 677 | static void restore_cpu_virqs(unsigned int cpu) |
| 678 | { |
| 679 | struct evtchn_bind_virq bind_virq; |
| 680 | int virq, irq, evtchn; |
| 681 | |
| 682 | for (virq = 0; virq < NR_VIRQS; virq++) { |
| 683 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
| 684 | continue; |
| 685 | |
| 686 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); |
| 687 | BUG_ON(irq_info[irq].index != virq); |
| 688 | |
| 689 | /* Get a new binding from Xen. */ |
| 690 | bind_virq.virq = virq; |
| 691 | bind_virq.vcpu = cpu; |
| 692 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, |
| 693 | &bind_virq) != 0) |
| 694 | BUG(); |
| 695 | evtchn = bind_virq.port; |
| 696 | |
| 697 | /* Record the new mapping. */ |
| 698 | evtchn_to_irq[evtchn] = irq; |
| 699 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); |
| 700 | bind_evtchn_to_cpu(evtchn, cpu); |
| 701 | |
| 702 | /* Ready for use. */ |
| 703 | unmask_evtchn(evtchn); |
| 704 | } |
| 705 | } |
| 706 | |
| 707 | static void restore_cpu_ipis(unsigned int cpu) |
| 708 | { |
| 709 | struct evtchn_bind_ipi bind_ipi; |
| 710 | int ipi, irq, evtchn; |
| 711 | |
| 712 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { |
| 713 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
| 714 | continue; |
| 715 | |
| 716 | BUG_ON(irq_info[irq].type != IRQT_IPI); |
| 717 | BUG_ON(irq_info[irq].index != ipi); |
| 718 | |
| 719 | /* Get a new binding from Xen. */ |
| 720 | bind_ipi.vcpu = cpu; |
| 721 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, |
| 722 | &bind_ipi) != 0) |
| 723 | BUG(); |
| 724 | evtchn = bind_ipi.port; |
| 725 | |
| 726 | /* Record the new mapping. */ |
| 727 | evtchn_to_irq[evtchn] = irq; |
| 728 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); |
| 729 | bind_evtchn_to_cpu(evtchn, cpu); |
| 730 | |
| 731 | /* Ready for use. */ |
| 732 | unmask_evtchn(evtchn); |
| 733 | |
| 734 | } |
| 735 | } |
| 736 | |
Jeremy Fitzhardinge | 2d9e1e2 | 2008-07-07 12:07:53 -0700 | [diff] [blame] | 737 | /* Clear an irq's pending state, in preparation for polling on it */ |
| 738 | void xen_clear_irq_pending(int irq) |
| 739 | { |
| 740 | int evtchn = evtchn_from_irq(irq); |
| 741 | |
| 742 | if (VALID_EVTCHN(evtchn)) |
| 743 | clear_evtchn(evtchn); |
| 744 | } |
| 745 | |
| 746 | /* Poll waiting for an irq to become pending. In the usual case, the |
| 747 | irq will be disabled so it won't deliver an interrupt. */ |
| 748 | void xen_poll_irq(int irq) |
| 749 | { |
| 750 | evtchn_port_t evtchn = evtchn_from_irq(irq); |
| 751 | |
| 752 | if (VALID_EVTCHN(evtchn)) { |
| 753 | struct sched_poll poll; |
| 754 | |
| 755 | poll.nr_ports = 1; |
| 756 | poll.timeout = 0; |
| 757 | poll.ports = &evtchn; |
| 758 | |
| 759 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) |
| 760 | BUG(); |
| 761 | } |
| 762 | } |
| 763 | |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 764 | void xen_irq_resume(void) |
| 765 | { |
| 766 | unsigned int cpu, irq, evtchn; |
| 767 | |
| 768 | init_evtchn_cpu_bindings(); |
| 769 | |
| 770 | /* New event-channel space is not 'live' yet. */ |
| 771 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
| 772 | mask_evtchn(evtchn); |
| 773 | |
| 774 | /* No IRQ <-> event-channel mappings. */ |
| 775 | for (irq = 0; irq < NR_IRQS; irq++) |
| 776 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
| 777 | |
| 778 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) |
| 779 | evtchn_to_irq[evtchn] = -1; |
| 780 | |
| 781 | for_each_possible_cpu(cpu) { |
| 782 | restore_cpu_virqs(cpu); |
| 783 | restore_cpu_ipis(cpu); |
| 784 | } |
| 785 | } |
| 786 | |
Jeremy Fitzhardinge | e46cdb6 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 787 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
| 788 | .name = "xen-dyn", |
| 789 | .mask = disable_dynirq, |
| 790 | .unmask = enable_dynirq, |
| 791 | .ack = ack_dynirq, |
| 792 | .set_affinity = set_affinity_irq, |
| 793 | .retrigger = retrigger_dynirq, |
| 794 | }; |
| 795 | |
| 796 | void __init xen_init_IRQ(void) |
| 797 | { |
| 798 | int i; |
| 799 | |
| 800 | init_evtchn_cpu_bindings(); |
| 801 | |
| 802 | /* No event channels are 'live' right now. */ |
| 803 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
| 804 | mask_evtchn(i); |
| 805 | |
| 806 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ |
| 807 | for (i = 0; i < NR_IRQS; i++) |
| 808 | irq_bindcount[i] = 0; |
| 809 | |
| 810 | irq_ctx_init(smp_processor_id()); |
| 811 | } |