| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * SGI UV IRQ functions | 
 | 7 |  * | 
 | 8 |  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/module.h> | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 12 | #include <linux/rbtree.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 14 | #include <linux/irq.h> | 
| Ingo Molnar | 37762b6 | 2008-10-03 11:38:37 +0200 | [diff] [blame] | 15 |  | 
 | 16 | #include <asm/apic.h> | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 17 | #include <asm/uv/uv_irq.h> | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 18 | #include <asm/uv/uv_hub.h> | 
 | 19 |  | 
 | 20 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ | 
 | 21 | struct uv_irq_2_mmr_pnode{ | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 22 | 	struct rb_node		list; | 
 | 23 | 	unsigned long		offset; | 
 | 24 | 	int			pnode; | 
 | 25 | 	int			irq; | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 26 | }; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 27 |  | 
| Cliff Wickman | d2ebc71 | 2012-01-18 09:40:47 -0600 | [diff] [blame] | 28 | static DEFINE_SPINLOCK(uv_irq_lock); | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 29 | static struct rb_root		uv_irq_root; | 
 | 30 |  | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 31 | static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 32 |  | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 33 | static void uv_noop(struct irq_data *data) { } | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 34 |  | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 35 | static void uv_ack_apic(struct irq_data *data) | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 36 | { | 
 | 37 | 	ack_APIC_irq(); | 
 | 38 | } | 
 | 39 |  | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 40 | static struct irq_chip uv_irq_chip = { | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 41 | 	.name			= "UV-CORE", | 
 | 42 | 	.irq_mask		= uv_noop, | 
 | 43 | 	.irq_unmask		= uv_noop, | 
 | 44 | 	.irq_eoi		= uv_ack_apic, | 
 | 45 | 	.irq_set_affinity	= uv_set_irq_affinity, | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 46 | }; | 
 | 47 |  | 
 | 48 | /* | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 49 |  * Add offset and pnode information of the hub sourcing interrupts to the | 
 | 50 |  * rb tree for a specific irq. | 
 | 51 |  */ | 
 | 52 | static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) | 
 | 53 | { | 
 | 54 | 	struct rb_node **link = &uv_irq_root.rb_node; | 
 | 55 | 	struct rb_node *parent = NULL; | 
 | 56 | 	struct uv_irq_2_mmr_pnode *n; | 
 | 57 | 	struct uv_irq_2_mmr_pnode *e; | 
 | 58 | 	unsigned long irqflags; | 
 | 59 |  | 
 | 60 | 	n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, | 
 | 61 | 				uv_blade_to_memory_nid(blade)); | 
 | 62 | 	if (!n) | 
 | 63 | 		return -ENOMEM; | 
 | 64 |  | 
 | 65 | 	n->irq = irq; | 
 | 66 | 	n->offset = offset; | 
 | 67 | 	n->pnode = uv_blade_to_pnode(blade); | 
 | 68 | 	spin_lock_irqsave(&uv_irq_lock, irqflags); | 
 | 69 | 	/* Find the right place in the rbtree: */ | 
 | 70 | 	while (*link) { | 
 | 71 | 		parent = *link; | 
 | 72 | 		e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); | 
 | 73 |  | 
 | 74 | 		if (unlikely(irq == e->irq)) { | 
 | 75 | 			/* irq entry exists */ | 
 | 76 | 			e->pnode = uv_blade_to_pnode(blade); | 
 | 77 | 			e->offset = offset; | 
 | 78 | 			spin_unlock_irqrestore(&uv_irq_lock, irqflags); | 
 | 79 | 			kfree(n); | 
 | 80 | 			return 0; | 
 | 81 | 		} | 
 | 82 |  | 
 | 83 | 		if (irq < e->irq) | 
 | 84 | 			link = &(*link)->rb_left; | 
 | 85 | 		else | 
 | 86 | 			link = &(*link)->rb_right; | 
 | 87 | 	} | 
 | 88 |  | 
 | 89 | 	/* Insert the node into the rbtree. */ | 
 | 90 | 	rb_link_node(&n->list, parent, link); | 
 | 91 | 	rb_insert_color(&n->list, &uv_irq_root); | 
 | 92 |  | 
 | 93 | 	spin_unlock_irqrestore(&uv_irq_lock, irqflags); | 
 | 94 | 	return 0; | 
 | 95 | } | 
 | 96 |  | 
 | 97 | /* Retrieve offset and pnode information from the rb tree for a specific irq */ | 
 | 98 | int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | 
 | 99 | { | 
 | 100 | 	struct uv_irq_2_mmr_pnode *e; | 
 | 101 | 	struct rb_node *n; | 
 | 102 | 	unsigned long irqflags; | 
 | 103 |  | 
 | 104 | 	spin_lock_irqsave(&uv_irq_lock, irqflags); | 
 | 105 | 	n = uv_irq_root.rb_node; | 
 | 106 | 	while (n) { | 
 | 107 | 		e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | 
 | 108 |  | 
 | 109 | 		if (e->irq == irq) { | 
 | 110 | 			*offset = e->offset; | 
 | 111 | 			*pnode = e->pnode; | 
 | 112 | 			spin_unlock_irqrestore(&uv_irq_lock, irqflags); | 
 | 113 | 			return 0; | 
 | 114 | 		} | 
 | 115 |  | 
 | 116 | 		if (irq < e->irq) | 
 | 117 | 			n = n->rb_left; | 
 | 118 | 		else | 
 | 119 | 			n = n->rb_right; | 
 | 120 | 	} | 
 | 121 | 	spin_unlock_irqrestore(&uv_irq_lock, irqflags); | 
 | 122 | 	return -1; | 
 | 123 | } | 
 | 124 |  | 
 | 125 | /* | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 126 |  * Re-target the irq to the specified CPU and enable the specified MMR located | 
 | 127 |  * on the specified blade to allow the sending of MSIs to the specified CPU. | 
 | 128 |  */ | 
 | 129 | static int | 
 | 130 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 131 | 		       unsigned long mmr_offset, int limit) | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 132 | { | 
 | 133 | 	const struct cpumask *eligible_cpu = cpumask_of(cpu); | 
| Thomas Gleixner | 2c77865 | 2011-03-12 12:20:43 +0100 | [diff] [blame] | 134 | 	struct irq_cfg *cfg = irq_get_chip_data(irq); | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 135 | 	unsigned long mmr_value; | 
 | 136 | 	struct uv_IO_APIC_route_entry *entry; | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 137 | 	int mmr_pnode, err; | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 138 | 	unsigned int dest; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 139 |  | 
 | 140 | 	BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | 
 | 141 | 			sizeof(unsigned long)); | 
 | 142 |  | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 143 | 	err = assign_irq_vector(irq, cfg, eligible_cpu); | 
 | 144 | 	if (err != 0) | 
 | 145 | 		return err; | 
 | 146 |  | 
| Alexander Gordeev | a5a3915 | 2012-06-14 09:49:35 +0200 | [diff] [blame] | 147 | 	err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest); | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 148 | 	if (err != 0) | 
 | 149 | 		return err; | 
 | 150 |  | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 151 | 	if (limit == UV_AFFINITY_CPU) | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 152 | 		irq_set_status_flags(irq, IRQ_NO_BALANCING); | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 153 | 	else | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 154 | 		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 155 |  | 
| Thomas Gleixner | 2c77865 | 2011-03-12 12:20:43 +0100 | [diff] [blame] | 156 | 	irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 157 | 				      irq_name); | 
 | 158 |  | 
 | 159 | 	mmr_value = 0; | 
 | 160 | 	entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | 
 | 161 | 	entry->vector		= cfg->vector; | 
 | 162 | 	entry->delivery_mode	= apic->irq_delivery_mode; | 
 | 163 | 	entry->dest_mode	= apic->irq_dest_mode; | 
 | 164 | 	entry->polarity		= 0; | 
 | 165 | 	entry->trigger		= 0; | 
 | 166 | 	entry->mask		= 0; | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 167 | 	entry->dest		= dest; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 168 |  | 
 | 169 | 	mmr_pnode = uv_blade_to_pnode(mmr_blade); | 
 | 170 | 	uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 
 | 171 |  | 
 | 172 | 	if (cfg->move_in_progress) | 
 | 173 | 		send_cleanup_vector(cfg); | 
 | 174 |  | 
 | 175 | 	return irq; | 
 | 176 | } | 
 | 177 |  | 
 | 178 | /* | 
 | 179 |  * Disable the specified MMR located on the specified blade so that MSIs are | 
 | 180 |  * longer allowed to be sent. | 
 | 181 |  */ | 
 | 182 | static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | 
 | 183 | { | 
 | 184 | 	unsigned long mmr_value; | 
 | 185 | 	struct uv_IO_APIC_route_entry *entry; | 
 | 186 |  | 
 | 187 | 	BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | 
 | 188 | 			sizeof(unsigned long)); | 
 | 189 |  | 
 | 190 | 	mmr_value = 0; | 
 | 191 | 	entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | 
 | 192 | 	entry->mask = 1; | 
 | 193 |  | 
 | 194 | 	uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 
 | 195 | } | 
 | 196 |  | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 197 | static int | 
 | 198 | uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, | 
 | 199 | 		    bool force) | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 200 | { | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 201 | 	struct irq_cfg *cfg = data->chip_data; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 202 | 	unsigned int dest; | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 203 | 	unsigned long mmr_value, mmr_offset; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 204 | 	struct uv_IO_APIC_route_entry *entry; | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 205 | 	int mmr_pnode; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 206 |  | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 207 | 	if (__ioapic_set_affinity(data, mask, &dest)) | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 208 | 		return -1; | 
 | 209 |  | 
 | 210 | 	mmr_value = 0; | 
 | 211 | 	entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | 
 | 212 |  | 
 | 213 | 	entry->vector		= cfg->vector; | 
 | 214 | 	entry->delivery_mode	= apic->irq_delivery_mode; | 
 | 215 | 	entry->dest_mode	= apic->irq_dest_mode; | 
 | 216 | 	entry->polarity		= 0; | 
 | 217 | 	entry->trigger		= 0; | 
 | 218 | 	entry->mask		= 0; | 
 | 219 | 	entry->dest		= dest; | 
 | 220 |  | 
 | 221 | 	/* Get previously stored MMR and pnode of hub sourcing interrupts */ | 
| Thomas Gleixner | 48b2650 | 2010-09-30 11:43:08 +0200 | [diff] [blame] | 222 | 	if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 223 | 		return -1; | 
 | 224 |  | 
 | 225 | 	uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 
 | 226 |  | 
 | 227 | 	if (cfg->move_in_progress) | 
 | 228 | 		send_cleanup_vector(cfg); | 
 | 229 |  | 
| Jiang Liu | f841d79 | 2012-03-30 23:11:35 +0800 | [diff] [blame] | 230 | 	return IRQ_SET_MASK_OK_NOCOPY; | 
| Dimitri Sivanich | 9338ad6 | 2009-10-13 15:32:36 -0500 | [diff] [blame] | 231 | } | 
 | 232 |  | 
 | 233 | /* | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 234 |  * Set up a mapping of an available irq and vector, and enable the specified | 
 | 235 |  * MMR that defines the MSI that is to be sent to the specified CPU when an | 
 | 236 |  * interrupt is raised. | 
 | 237 |  */ | 
 | 238 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 239 | 		 unsigned long mmr_offset, int limit) | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 240 | { | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 241 | 	int irq, ret; | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 242 |  | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 243 | 	irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); | 
 | 244 |  | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 245 | 	if (irq <= 0) | 
 | 246 | 		return -EBUSY; | 
 | 247 |  | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 248 | 	ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, | 
| Randy Dunlap | a289cc7 | 2010-04-16 17:51:42 -0700 | [diff] [blame] | 249 | 		limit); | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 250 | 	if (ret == irq) | 
 | 251 | 		uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); | 
 | 252 | 	else | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 253 | 		destroy_irq(irq); | 
 | 254 |  | 
 | 255 | 	return ret; | 
 | 256 | } | 
 | 257 | EXPORT_SYMBOL_GPL(uv_setup_irq); | 
 | 258 |  | 
 | 259 | /* | 
 | 260 |  * Tear down a mapping of an irq and vector, and disable the specified MMR that | 
 | 261 |  * defined the MSI that was to be sent to the specified CPU when an interrupt | 
 | 262 |  * was raised. | 
 | 263 |  * | 
 | 264 |  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). | 
 | 265 |  */ | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 266 | void uv_teardown_irq(unsigned int irq) | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 267 | { | 
| Dimitri Sivanich | 6c2c502 | 2009-09-30 11:02:59 -0500 | [diff] [blame] | 268 | 	struct uv_irq_2_mmr_pnode *e; | 
 | 269 | 	struct rb_node *n; | 
 | 270 | 	unsigned long irqflags; | 
 | 271 |  | 
 | 272 | 	spin_lock_irqsave(&uv_irq_lock, irqflags); | 
 | 273 | 	n = uv_irq_root.rb_node; | 
 | 274 | 	while (n) { | 
 | 275 | 		e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | 
 | 276 | 		if (e->irq == irq) { | 
 | 277 | 			arch_disable_uv_irq(e->pnode, e->offset); | 
 | 278 | 			rb_erase(n, &uv_irq_root); | 
 | 279 | 			kfree(e); | 
 | 280 | 			break; | 
 | 281 | 		} | 
 | 282 | 		if (irq < e->irq) | 
 | 283 | 			n = n->rb_left; | 
 | 284 | 		else | 
 | 285 | 			n = n->rb_right; | 
 | 286 | 	} | 
 | 287 | 	spin_unlock_irqrestore(&uv_irq_lock, irqflags); | 
| Dean Nelson | 4173a0e | 2008-10-02 12:18:21 -0500 | [diff] [blame] | 288 | 	destroy_irq(irq); | 
 | 289 | } | 
 | 290 | EXPORT_SYMBOL_GPL(uv_teardown_irq); |