blob: f25c2765a5c9b48bbac24551dc2e82fc9e2d2ce2 [file] [log] [blame]
Dean Nelson4173a0e2008-10-02 12:18:21 -05001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV IRQ functions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/module.h>
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050012#include <linux/rbtree.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Dean Nelson4173a0e2008-10-02 12:18:21 -050014#include <linux/irq.h>
Ingo Molnar37762b62008-10-03 11:38:37 +020015
16#include <asm/apic.h>
Dean Nelson4173a0e2008-10-02 12:18:21 -050017#include <asm/uv/uv_irq.h>
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050018#include <asm/uv/uv_hub.h>
19
20/* MMR offset and pnode of hub sourcing interrupts for a given irq */
21struct uv_irq_2_mmr_pnode{
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050022 struct rb_node list;
23 unsigned long offset;
24 int pnode;
25 int irq;
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050026};
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050027
Cliff Wickmand2ebc712012-01-18 09:40:47 -060028static DEFINE_SPINLOCK(uv_irq_lock);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050029static struct rb_root uv_irq_root;
30
Thomas Gleixner48b26502010-09-30 11:43:08 +020031static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
Dean Nelson4173a0e2008-10-02 12:18:21 -050032
Thomas Gleixner48b26502010-09-30 11:43:08 +020033static void uv_noop(struct irq_data *data) { }
Dean Nelson4173a0e2008-10-02 12:18:21 -050034
Thomas Gleixner48b26502010-09-30 11:43:08 +020035static void uv_ack_apic(struct irq_data *data)
Dean Nelson4173a0e2008-10-02 12:18:21 -050036{
37 ack_APIC_irq();
38}
39
Randy Dunlapa289cc72010-04-16 17:51:42 -070040static struct irq_chip uv_irq_chip = {
Thomas Gleixner48b26502010-09-30 11:43:08 +020041 .name = "UV-CORE",
42 .irq_mask = uv_noop,
43 .irq_unmask = uv_noop,
44 .irq_eoi = uv_ack_apic,
45 .irq_set_affinity = uv_set_irq_affinity,
Dean Nelson4173a0e2008-10-02 12:18:21 -050046};
47
48/*
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050049 * Add offset and pnode information of the hub sourcing interrupts to the
50 * rb tree for a specific irq.
51 */
52static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
53{
54 struct rb_node **link = &uv_irq_root.rb_node;
55 struct rb_node *parent = NULL;
56 struct uv_irq_2_mmr_pnode *n;
57 struct uv_irq_2_mmr_pnode *e;
58 unsigned long irqflags;
59
60 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
61 uv_blade_to_memory_nid(blade));
62 if (!n)
63 return -ENOMEM;
64
65 n->irq = irq;
66 n->offset = offset;
67 n->pnode = uv_blade_to_pnode(blade);
68 spin_lock_irqsave(&uv_irq_lock, irqflags);
69 /* Find the right place in the rbtree: */
70 while (*link) {
71 parent = *link;
72 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
73
74 if (unlikely(irq == e->irq)) {
75 /* irq entry exists */
76 e->pnode = uv_blade_to_pnode(blade);
77 e->offset = offset;
78 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
79 kfree(n);
80 return 0;
81 }
82
83 if (irq < e->irq)
84 link = &(*link)->rb_left;
85 else
86 link = &(*link)->rb_right;
87 }
88
89 /* Insert the node into the rbtree. */
90 rb_link_node(&n->list, parent, link);
91 rb_insert_color(&n->list, &uv_irq_root);
92
93 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
94 return 0;
95}
96
97/* Retrieve offset and pnode information from the rb tree for a specific irq */
98int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
99{
100 struct uv_irq_2_mmr_pnode *e;
101 struct rb_node *n;
102 unsigned long irqflags;
103
104 spin_lock_irqsave(&uv_irq_lock, irqflags);
105 n = uv_irq_root.rb_node;
106 while (n) {
107 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
108
109 if (e->irq == irq) {
110 *offset = e->offset;
111 *pnode = e->pnode;
112 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
113 return 0;
114 }
115
116 if (irq < e->irq)
117 n = n->rb_left;
118 else
119 n = n->rb_right;
120 }
121 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
122 return -1;
123}
124
125/*
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500126 * Re-target the irq to the specified CPU and enable the specified MMR located
127 * on the specified blade to allow the sending of MSIs to the specified CPU.
128 */
129static int
130arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700131 unsigned long mmr_offset, int limit)
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500132{
133 const struct cpumask *eligible_cpu = cpumask_of(cpu);
Thomas Gleixner2c778652011-03-12 12:20:43 +0100134 struct irq_cfg *cfg = irq_get_chip_data(irq);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500135 unsigned long mmr_value;
136 struct uv_IO_APIC_route_entry *entry;
Thomas Gleixner48b26502010-09-30 11:43:08 +0200137 int mmr_pnode, err;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500138
139 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
140 sizeof(unsigned long));
141
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500142 err = assign_irq_vector(irq, cfg, eligible_cpu);
143 if (err != 0)
144 return err;
145
Randy Dunlapa289cc72010-04-16 17:51:42 -0700146 if (limit == UV_AFFINITY_CPU)
Thomas Gleixner48b26502010-09-30 11:43:08 +0200147 irq_set_status_flags(irq, IRQ_NO_BALANCING);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500148 else
Thomas Gleixner48b26502010-09-30 11:43:08 +0200149 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500150
Thomas Gleixner2c778652011-03-12 12:20:43 +0100151 irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500152 irq_name);
153
154 mmr_value = 0;
155 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
156 entry->vector = cfg->vector;
157 entry->delivery_mode = apic->irq_delivery_mode;
158 entry->dest_mode = apic->irq_dest_mode;
159 entry->polarity = 0;
160 entry->trigger = 0;
161 entry->mask = 0;
162 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
163
164 mmr_pnode = uv_blade_to_pnode(mmr_blade);
165 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
166
167 if (cfg->move_in_progress)
168 send_cleanup_vector(cfg);
169
170 return irq;
171}
172
173/*
174 * Disable the specified MMR located on the specified blade so that MSIs are
175 * longer allowed to be sent.
176 */
177static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
178{
179 unsigned long mmr_value;
180 struct uv_IO_APIC_route_entry *entry;
181
182 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
183 sizeof(unsigned long));
184
185 mmr_value = 0;
186 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
187 entry->mask = 1;
188
189 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
190}
191
Thomas Gleixner48b26502010-09-30 11:43:08 +0200192static int
193uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
194 bool force)
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500195{
Thomas Gleixner48b26502010-09-30 11:43:08 +0200196 struct irq_cfg *cfg = data->chip_data;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500197 unsigned int dest;
Thomas Gleixner48b26502010-09-30 11:43:08 +0200198 unsigned long mmr_value, mmr_offset;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500199 struct uv_IO_APIC_route_entry *entry;
Randy Dunlapa289cc72010-04-16 17:51:42 -0700200 int mmr_pnode;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500201
Thomas Gleixner48b26502010-09-30 11:43:08 +0200202 if (__ioapic_set_affinity(data, mask, &dest))
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500203 return -1;
204
205 mmr_value = 0;
206 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
207
208 entry->vector = cfg->vector;
209 entry->delivery_mode = apic->irq_delivery_mode;
210 entry->dest_mode = apic->irq_dest_mode;
211 entry->polarity = 0;
212 entry->trigger = 0;
213 entry->mask = 0;
214 entry->dest = dest;
215
216 /* Get previously stored MMR and pnode of hub sourcing interrupts */
Thomas Gleixner48b26502010-09-30 11:43:08 +0200217 if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500218 return -1;
219
220 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
221
222 if (cfg->move_in_progress)
223 send_cleanup_vector(cfg);
224
225 return 0;
226}
227
228/*
Dean Nelson4173a0e2008-10-02 12:18:21 -0500229 * Set up a mapping of an available irq and vector, and enable the specified
230 * MMR that defines the MSI that is to be sent to the specified CPU when an
231 * interrupt is raised.
232 */
233int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700234 unsigned long mmr_offset, int limit)
Dean Nelson4173a0e2008-10-02 12:18:21 -0500235{
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500236 int irq, ret;
Dean Nelson4173a0e2008-10-02 12:18:21 -0500237
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500238 irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
239
Dean Nelson4173a0e2008-10-02 12:18:21 -0500240 if (irq <= 0)
241 return -EBUSY;
242
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500243 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700244 limit);
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500245 if (ret == irq)
246 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
247 else
Dean Nelson4173a0e2008-10-02 12:18:21 -0500248 destroy_irq(irq);
249
250 return ret;
251}
252EXPORT_SYMBOL_GPL(uv_setup_irq);
253
254/*
255 * Tear down a mapping of an irq and vector, and disable the specified MMR that
256 * defined the MSI that was to be sent to the specified CPU when an interrupt
257 * was raised.
258 *
259 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
260 */
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500261void uv_teardown_irq(unsigned int irq)
Dean Nelson4173a0e2008-10-02 12:18:21 -0500262{
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500263 struct uv_irq_2_mmr_pnode *e;
264 struct rb_node *n;
265 unsigned long irqflags;
266
267 spin_lock_irqsave(&uv_irq_lock, irqflags);
268 n = uv_irq_root.rb_node;
269 while (n) {
270 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
271 if (e->irq == irq) {
272 arch_disable_uv_irq(e->pnode, e->offset);
273 rb_erase(n, &uv_irq_root);
274 kfree(e);
275 break;
276 }
277 if (irq < e->irq)
278 n = n->rb_left;
279 else
280 n = n->rb_right;
281 }
282 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
Dean Nelson4173a0e2008-10-02 12:18:21 -0500283 destroy_irq(irq);
284}
285EXPORT_SYMBOL_GPL(uv_teardown_irq);