blob: e994c7ed916ecb76e09d6b97fda16a075decb70c [file] [log] [blame]
Paul Mundt2be6bb0c2010-10-05 22:10:30 +09001/*
2 * Dynamic IRQ management
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/x86/kernel/apic/io_apic.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#define pr_fmt(fmt) "intc: " fmt
13
14#include <linux/irq.h>
15#include <linux/bitmap.h>
16#include <linux/spinlock.h>
17#include "internals.h" /* only for activate_irq() damage.. */
18
19/*
20 * The intc_irq_map provides a global map of bound IRQ vectors for a
21 * given platform. Allocation of IRQs are either static through the CPU
22 * vector map, or dynamic in the case of board mux vectors or MSI.
23 *
24 * As this is a central point for all IRQ controllers on the system,
25 * each of the available sources are mapped out here. This combined with
26 * sparseirq makes it quite trivial to keep the vector map tightly packed
27 * when dynamically creating IRQs, as well as tying in to otherwise
28 * unused irq_desc positions in the sparse array.
29 */
30static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
31static DEFINE_RAW_SPINLOCK(vector_lock);
32
33/*
34 * Dynamic IRQ allocation and deallocation
35 */
36unsigned int create_irq_nr(unsigned int irq_want, int node)
37{
38 unsigned int irq = 0, new;
39 unsigned long flags;
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090040
41 raw_spin_lock_irqsave(&vector_lock, flags);
42
43 /*
44 * First try the wanted IRQ
45 */
46 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
47 new = irq_want;
48 } else {
49 /* .. then fall back to scanning. */
50 new = find_first_zero_bit(intc_irq_map, nr_irqs);
51 if (unlikely(new == nr_irqs))
52 goto out_unlock;
53
54 __set_bit(new, intc_irq_map);
55 }
56
Thomas Gleixnerc4318ba2010-10-12 02:03:09 +090057 raw_spin_unlock_irqrestore(&vector_lock, flags);
58
59 irq = irq_alloc_desc_at(new, node);
60 if (unlikely(irq != new)) {
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090061 pr_err("can't get irq_desc for %d\n", new);
Thomas Gleixnerc4318ba2010-10-12 02:03:09 +090062 return 0;
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090063 }
64
Thomas Gleixnerc4318ba2010-10-12 02:03:09 +090065 activate_irq(irq);
66 return 0;
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090067
68out_unlock:
69 raw_spin_unlock_irqrestore(&vector_lock, flags);
Thomas Gleixnerc4318ba2010-10-12 02:03:09 +090070 return 0;
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090071}
72
73int create_irq(void)
74{
75 int nid = cpu_to_node(smp_processor_id());
76 int irq;
77
78 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
79 if (irq == 0)
80 irq = -1;
81
82 return irq;
83}
84
85void destroy_irq(unsigned int irq)
86{
87 unsigned long flags;
88
Thomas Gleixnerc4318ba2010-10-12 02:03:09 +090089 irq_free_desc(irq);
Paul Mundt2be6bb0c2010-10-05 22:10:30 +090090
91 raw_spin_lock_irqsave(&vector_lock, flags);
92 __clear_bit(irq, intc_irq_map);
93 raw_spin_unlock_irqrestore(&vector_lock, flags);
94}
95
96int reserve_irq_vector(unsigned int irq)
97{
98 unsigned long flags;
99 int ret = 0;
100
101 raw_spin_lock_irqsave(&vector_lock, flags);
102 if (test_and_set_bit(irq, intc_irq_map))
103 ret = -EBUSY;
104 raw_spin_unlock_irqrestore(&vector_lock, flags);
105
106 return ret;
107}
108
109void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
110{
111 unsigned long flags;
112 int i;
113
114 raw_spin_lock_irqsave(&vector_lock, flags);
115 for (i = 0; i < nr_vecs; i++)
116 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
117 raw_spin_unlock_irqrestore(&vector_lock, flags);
118}
119
120void reserve_irq_legacy(void)
121{
122 unsigned long flags;
123 int i, j;
124
125 raw_spin_lock_irqsave(&vector_lock, flags);
126 j = find_first_bit(intc_irq_map, nr_irqs);
127 for (i = 0; i < j; i++)
128 __set_bit(i, intc_irq_map);
129 raw_spin_unlock_irqrestore(&vector_lock, flags);
130}