blob: 474912d03f40fba360df1ee1757c946fc3049ad5 [file] [log] [blame]
Dean Nelson4173a0e2008-10-02 12:18:21 -05001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV IRQ functions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/module.h>
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050012#include <linux/rbtree.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Dean Nelson4173a0e2008-10-02 12:18:21 -050014#include <linux/irq.h>
Jiang Liu331dd192015-04-13 14:11:27 +080015#include <linux/irqdomain.h>
Ingo Molnar37762b62008-10-03 11:38:37 +020016
17#include <asm/apic.h>
Dean Nelson4173a0e2008-10-02 12:18:21 -050018#include <asm/uv/uv_irq.h>
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050019#include <asm/uv/uv_hub.h>
20
21/* MMR offset and pnode of hub sourcing interrupts for a given irq */
22struct uv_irq_2_mmr_pnode{
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050023 struct rb_node list;
24 unsigned long offset;
25 int pnode;
26 int irq;
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050027};
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050028
Cliff Wickmand2ebc712012-01-18 09:40:47 -060029static DEFINE_SPINLOCK(uv_irq_lock);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -050030static struct rb_root uv_irq_root;
31
Thomas Gleixner48b26502010-09-30 11:43:08 +020032static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
Dean Nelson4173a0e2008-10-02 12:18:21 -050033
Thomas Gleixner48b26502010-09-30 11:43:08 +020034static void uv_noop(struct irq_data *data) { }
Dean Nelson4173a0e2008-10-02 12:18:21 -050035
Thomas Gleixner48b26502010-09-30 11:43:08 +020036static void uv_ack_apic(struct irq_data *data)
Dean Nelson4173a0e2008-10-02 12:18:21 -050037{
38 ack_APIC_irq();
39}
40
Randy Dunlapa289cc72010-04-16 17:51:42 -070041static struct irq_chip uv_irq_chip = {
Thomas Gleixner48b26502010-09-30 11:43:08 +020042 .name = "UV-CORE",
43 .irq_mask = uv_noop,
44 .irq_unmask = uv_noop,
45 .irq_eoi = uv_ack_apic,
46 .irq_set_affinity = uv_set_irq_affinity,
Dean Nelson4173a0e2008-10-02 12:18:21 -050047};
48
49/*
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -050050 * Add offset and pnode information of the hub sourcing interrupts to the
51 * rb tree for a specific irq.
52 */
53static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
54{
55 struct rb_node **link = &uv_irq_root.rb_node;
56 struct rb_node *parent = NULL;
57 struct uv_irq_2_mmr_pnode *n;
58 struct uv_irq_2_mmr_pnode *e;
59 unsigned long irqflags;
60
61 n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
62 uv_blade_to_memory_nid(blade));
63 if (!n)
64 return -ENOMEM;
65
66 n->irq = irq;
67 n->offset = offset;
68 n->pnode = uv_blade_to_pnode(blade);
69 spin_lock_irqsave(&uv_irq_lock, irqflags);
70 /* Find the right place in the rbtree: */
71 while (*link) {
72 parent = *link;
73 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
74
75 if (unlikely(irq == e->irq)) {
76 /* irq entry exists */
77 e->pnode = uv_blade_to_pnode(blade);
78 e->offset = offset;
79 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
80 kfree(n);
81 return 0;
82 }
83
84 if (irq < e->irq)
85 link = &(*link)->rb_left;
86 else
87 link = &(*link)->rb_right;
88 }
89
90 /* Insert the node into the rbtree. */
91 rb_link_node(&n->list, parent, link);
92 rb_insert_color(&n->list, &uv_irq_root);
93
94 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
95 return 0;
96}
97
98/* Retrieve offset and pnode information from the rb tree for a specific irq */
99int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
100{
101 struct uv_irq_2_mmr_pnode *e;
102 struct rb_node *n;
103 unsigned long irqflags;
104
105 spin_lock_irqsave(&uv_irq_lock, irqflags);
106 n = uv_irq_root.rb_node;
107 while (n) {
108 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
109
110 if (e->irq == irq) {
111 *offset = e->offset;
112 *pnode = e->pnode;
113 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
114 return 0;
115 }
116
117 if (irq < e->irq)
118 n = n->rb_left;
119 else
120 n = n->rb_right;
121 }
122 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
123 return -1;
124}
125
126/*
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500127 * Re-target the irq to the specified CPU and enable the specified MMR located
128 * on the specified blade to allow the sending of MSIs to the specified CPU.
129 */
130static int
131arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700132 unsigned long mmr_offset, int limit)
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500133{
Jiang Liua9786092014-10-27 16:12:07 +0800134 struct irq_cfg *cfg = irq_cfg(irq);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500135 unsigned long mmr_value;
136 struct uv_IO_APIC_route_entry *entry;
Jiang Liu331dd192015-04-13 14:11:27 +0800137 int mmr_pnode;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500138
139 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
140 sizeof(unsigned long));
141
Randy Dunlapa289cc72010-04-16 17:51:42 -0700142 if (limit == UV_AFFINITY_CPU)
Thomas Gleixner48b26502010-09-30 11:43:08 +0200143 irq_set_status_flags(irq, IRQ_NO_BALANCING);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500144 else
Thomas Gleixner48b26502010-09-30 11:43:08 +0200145 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500146
Thomas Gleixner2c778652011-03-12 12:20:43 +0100147 irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500148 irq_name);
149
150 mmr_value = 0;
151 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
152 entry->vector = cfg->vector;
153 entry->delivery_mode = apic->irq_delivery_mode;
154 entry->dest_mode = apic->irq_dest_mode;
155 entry->polarity = 0;
156 entry->trigger = 0;
157 entry->mask = 0;
Jiang Liu331dd192015-04-13 14:11:27 +0800158 entry->dest = cfg->dest_apicid;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500159
160 mmr_pnode = uv_blade_to_pnode(mmr_blade);
161 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
162
163 if (cfg->move_in_progress)
164 send_cleanup_vector(cfg);
165
166 return irq;
167}
168
169/*
170 * Disable the specified MMR located on the specified blade so that MSIs are
171 * longer allowed to be sent.
172 */
173static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
174{
175 unsigned long mmr_value;
176 struct uv_IO_APIC_route_entry *entry;
177
178 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
179 sizeof(unsigned long));
180
181 mmr_value = 0;
182 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
183 entry->mask = 1;
184
185 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
186}
187
Thomas Gleixner48b26502010-09-30 11:43:08 +0200188static int
189uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
190 bool force)
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500191{
Jiang Liua9786092014-10-27 16:12:07 +0800192 struct irq_cfg *cfg = irqd_cfg(data);
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500193 unsigned int dest;
Thomas Gleixner48b26502010-09-30 11:43:08 +0200194 unsigned long mmr_value, mmr_offset;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500195 struct uv_IO_APIC_route_entry *entry;
Randy Dunlapa289cc72010-04-16 17:51:42 -0700196 int mmr_pnode;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500197
Jiang Liucb39288c2014-10-27 16:11:57 +0800198 if (apic_set_affinity(data, mask, &dest))
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500199 return -1;
200
201 mmr_value = 0;
202 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
203
204 entry->vector = cfg->vector;
205 entry->delivery_mode = apic->irq_delivery_mode;
206 entry->dest_mode = apic->irq_dest_mode;
207 entry->polarity = 0;
208 entry->trigger = 0;
209 entry->mask = 0;
210 entry->dest = dest;
211
212 /* Get previously stored MMR and pnode of hub sourcing interrupts */
Thomas Gleixner48b26502010-09-30 11:43:08 +0200213 if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500214 return -1;
215
216 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
217
218 if (cfg->move_in_progress)
219 send_cleanup_vector(cfg);
220
Jiang Liuf841d792012-03-30 23:11:35 +0800221 return IRQ_SET_MASK_OK_NOCOPY;
Dimitri Sivanich9338ad62009-10-13 15:32:36 -0500222}
223
224/*
Dean Nelson4173a0e2008-10-02 12:18:21 -0500225 * Set up a mapping of an available irq and vector, and enable the specified
226 * MMR that defines the MSI that is to be sent to the specified CPU when an
227 * interrupt is raised.
228 */
229int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700230 unsigned long mmr_offset, int limit)
Dean Nelson4173a0e2008-10-02 12:18:21 -0500231{
Jiang Liu331dd192015-04-13 14:11:27 +0800232 int ret, irq;
233 struct irq_alloc_info info;
Dean Nelson4173a0e2008-10-02 12:18:21 -0500234
Jiang Liu331dd192015-04-13 14:11:27 +0800235 init_irq_alloc_info(&info, cpumask_of(cpu));
236 irq = irq_domain_alloc_irqs(NULL, 1, uv_blade_to_memory_nid(mmr_blade),
237 &info);
238 if (irq <= 0)
Dean Nelson4173a0e2008-10-02 12:18:21 -0500239 return -EBUSY;
240
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500241 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
Randy Dunlapa289cc72010-04-16 17:51:42 -0700242 limit);
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500243 if (ret == irq)
244 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
245 else
Jiang Liu331dd192015-04-13 14:11:27 +0800246 irq_domain_free_irqs(irq, 1);
Dean Nelson4173a0e2008-10-02 12:18:21 -0500247
248 return ret;
249}
250EXPORT_SYMBOL_GPL(uv_setup_irq);
251
252/*
253 * Tear down a mapping of an irq and vector, and disable the specified MMR that
254 * defined the MSI that was to be sent to the specified CPU when an interrupt
255 * was raised.
256 *
257 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
258 */
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500259void uv_teardown_irq(unsigned int irq)
Dean Nelson4173a0e2008-10-02 12:18:21 -0500260{
Dimitri Sivanich6c2c5022009-09-30 11:02:59 -0500261 struct uv_irq_2_mmr_pnode *e;
262 struct rb_node *n;
263 unsigned long irqflags;
264
265 spin_lock_irqsave(&uv_irq_lock, irqflags);
266 n = uv_irq_root.rb_node;
267 while (n) {
268 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
269 if (e->irq == irq) {
270 arch_disable_uv_irq(e->pnode, e->offset);
271 rb_erase(n, &uv_irq_root);
272 kfree(e);
273 break;
274 }
275 if (irq < e->irq)
276 n = n->rb_left;
277 else
278 n = n->rb_right;
279 }
280 spin_unlock_irqrestore(&uv_irq_lock, irqflags);
Jiang Liu331dd192015-04-13 14:11:27 +0800281 irq_domain_free_irqs(irq, 1);
Dean Nelson4173a0e2008-10-02 12:18:21 -0500282}
283EXPORT_SYMBOL_GPL(uv_teardown_irq);