Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Dynamic IRQ management |
| 3 | * |
| 4 | * Copyright (C) 2010 Paul Mundt |
| 5 | * |
| 6 | * Modelled after arch/x86/kernel/apic/io_apic.c |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | #define pr_fmt(fmt) "intc: " fmt |
| 13 | |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/bitmap.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include "internals.h" /* only for activate_irq() damage.. */ |
| 18 | |
| 19 | /* |
Paul Mundt | 38ab134 | 2010-10-26 16:05:08 +0900 | [diff] [blame] | 20 | * The IRQ bitmap provides a global map of bound IRQ vectors for a |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 21 | * given platform. Allocation of IRQs are either static through the CPU |
| 22 | * vector map, or dynamic in the case of board mux vectors or MSI. |
| 23 | * |
| 24 | * As this is a central point for all IRQ controllers on the system, |
| 25 | * each of the available sources are mapped out here. This combined with |
| 26 | * sparseirq makes it quite trivial to keep the vector map tightly packed |
| 27 | * when dynamically creating IRQs, as well as tying in to otherwise |
| 28 | * unused irq_desc positions in the sparse array. |
| 29 | */ |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 30 | |
| 31 | /* |
| 32 | * Dynamic IRQ allocation and deallocation |
| 33 | */ |
| 34 | unsigned int create_irq_nr(unsigned int irq_want, int node) |
| 35 | { |
Paul Mundt | 38ab134 | 2010-10-26 16:05:08 +0900 | [diff] [blame] | 36 | int irq = irq_alloc_desc_at(irq_want, node); |
| 37 | if (irq < 0) |
Thomas Gleixner | c4318ba | 2010-10-12 02:03:09 +0900 | [diff] [blame] | 38 | return 0; |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 39 | |
Thomas Gleixner | c4318ba | 2010-10-12 02:03:09 +0900 | [diff] [blame] | 40 | activate_irq(irq); |
Paul Mundt | 38ab134 | 2010-10-26 16:05:08 +0900 | [diff] [blame] | 41 | return irq; |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | int create_irq(void) |
| 45 | { |
Paul Mundt | 38ab134 | 2010-10-26 16:05:08 +0900 | [diff] [blame] | 46 | int irq = irq_alloc_desc(numa_node_id()); |
| 47 | if (irq >= 0) |
| 48 | activate_irq(irq); |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 49 | |
| 50 | return irq; |
| 51 | } |
| 52 | |
| 53 | void destroy_irq(unsigned int irq) |
| 54 | { |
Thomas Gleixner | c4318ba | 2010-10-12 02:03:09 +0900 | [diff] [blame] | 55 | irq_free_desc(irq); |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs) |
| 59 | { |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 60 | int i; |
| 61 | |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 62 | for (i = 0; i < nr_vecs; i++) |
Paul Mundt | 20f95e0 | 2010-11-01 16:10:48 -0400 | [diff] [blame] | 63 | irq_reserve_irq(evt2irq(vectors[i].vect)); |
Paul Mundt | 2be6bb0c | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 64 | } |