blob: 6caecdffe2019db13abe9bbf3a17d34b3020a640 [file] [log] [blame]
Paul Mundt2be6bb0c2010-10-05 22:10:30 +09001/*
2 * Dynamic IRQ management
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/x86/kernel/apic/io_apic.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#define pr_fmt(fmt) "intc: " fmt
13
14#include <linux/irq.h>
15#include <linux/bitmap.h>
16#include <linux/spinlock.h>
17#include "internals.h" /* only for activate_irq() damage.. */
18
19/*
20 * The intc_irq_map provides a global map of bound IRQ vectors for a
21 * given platform. Allocation of IRQs are either static through the CPU
22 * vector map, or dynamic in the case of board mux vectors or MSI.
23 *
24 * As this is a central point for all IRQ controllers on the system,
25 * each of the available sources are mapped out here. This combined with
26 * sparseirq makes it quite trivial to keep the vector map tightly packed
27 * when dynamically creating IRQs, as well as tying in to otherwise
28 * unused irq_desc positions in the sparse array.
29 */
30static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
31static DEFINE_RAW_SPINLOCK(vector_lock);
32
33/*
34 * Dynamic IRQ allocation and deallocation
35 */
36unsigned int create_irq_nr(unsigned int irq_want, int node)
37{
38 unsigned int irq = 0, new;
39 unsigned long flags;
40 struct irq_desc *desc;
41
42 raw_spin_lock_irqsave(&vector_lock, flags);
43
44 /*
45 * First try the wanted IRQ
46 */
47 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
48 new = irq_want;
49 } else {
50 /* .. then fall back to scanning. */
51 new = find_first_zero_bit(intc_irq_map, nr_irqs);
52 if (unlikely(new == nr_irqs))
53 goto out_unlock;
54
55 __set_bit(new, intc_irq_map);
56 }
57
58 desc = irq_to_desc_alloc_node(new, node);
59 if (unlikely(!desc)) {
60 pr_err("can't get irq_desc for %d\n", new);
61 goto out_unlock;
62 }
63
64 desc = move_irq_desc(desc, node);
65 irq = new;
66
67out_unlock:
68 raw_spin_unlock_irqrestore(&vector_lock, flags);
69
70 if (irq > 0) {
71 dynamic_irq_init(irq);
72 activate_irq(irq);
73 }
74
75 return irq;
76}
77
78int create_irq(void)
79{
80 int nid = cpu_to_node(smp_processor_id());
81 int irq;
82
83 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
84 if (irq == 0)
85 irq = -1;
86
87 return irq;
88}
89
90void destroy_irq(unsigned int irq)
91{
92 unsigned long flags;
93
94 dynamic_irq_cleanup(irq);
95
96 raw_spin_lock_irqsave(&vector_lock, flags);
97 __clear_bit(irq, intc_irq_map);
98 raw_spin_unlock_irqrestore(&vector_lock, flags);
99}
100
101int reserve_irq_vector(unsigned int irq)
102{
103 unsigned long flags;
104 int ret = 0;
105
106 raw_spin_lock_irqsave(&vector_lock, flags);
107 if (test_and_set_bit(irq, intc_irq_map))
108 ret = -EBUSY;
109 raw_spin_unlock_irqrestore(&vector_lock, flags);
110
111 return ret;
112}
113
114void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
115{
116 unsigned long flags;
117 int i;
118
119 raw_spin_lock_irqsave(&vector_lock, flags);
120 for (i = 0; i < nr_vecs; i++)
121 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
122 raw_spin_unlock_irqrestore(&vector_lock, flags);
123}
124
125void reserve_irq_legacy(void)
126{
127 unsigned long flags;
128 int i, j;
129
130 raw_spin_lock_irqsave(&vector_lock, flags);
131 j = find_first_bit(intc_irq_map, nr_irqs);
132 for (i = 0; i < j; i++)
133 __set_bit(i, intc_irq_map);
134 raw_spin_unlock_irqrestore(&vector_lock, flags);
135}