blob: cb625298854613171846048eb1775a5b6e401eb8 [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09004#include <linux/slab.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07005#include <linux/jiffies.h>
Suresh Siddha20f30972009-08-04 12:07:08 -07006#include <linux/hpet.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07007#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07008#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07009#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -080010#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +053011#include <asm/cpu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030012#include <linux/intel-iommu.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070013#include "intr_remapping.h"
Alexander Beregalov46f06b722009-04-06 16:45:28 +010014#include <acpi/acpi.h>
Weidong Hanf007e992009-05-23 00:41:15 +080015#include <asm/pci-direct.h>
16#include "pci.h"
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070017
18static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
Suresh Siddha20f30972009-08-04 12:07:08 -070019static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20static int ir_ioapic_num, ir_hpet_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070021int intr_remapping_enabled;
22
Weidong Han03ea8152009-04-17 16:42:15 +080023static int disable_intremap;
Chris Wrightd1423d52010-07-20 11:06:49 -070024static int disable_sourceid_checking;
25
Weidong Han03ea8152009-04-17 16:42:15 +080026static __init int setup_nointremap(char *str)
27{
28 disable_intremap = 1;
29 return 0;
30}
31early_param("nointremap", setup_nointremap);
32
Chris Wrightd1423d52010-07-20 11:06:49 -070033static __init int setup_intremap(char *str)
34{
35 if (!str)
36 return -EINVAL;
37
38 if (!strncmp(str, "on", 2))
39 disable_intremap = 0;
40 else if (!strncmp(str, "off", 3))
41 disable_intremap = 1;
42 else if (!strncmp(str, "nosid", 5))
43 disable_sourceid_checking = 1;
44
45 return 0;
46}
47early_param("intremap", setup_intremap);
48
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070049struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070050 struct intel_iommu *iommu;
51 u16 irte_index;
52 u16 sub_handle;
53 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070054};
55
Yinghai Lud7e51e62009-01-07 15:03:13 -080056#ifdef CONFIG_GENERIC_HARDIRQS
Yinghai Lue420dfb2008-08-19 20:50:21 -070057static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
58{
Thomas Gleixnera8ef54a2010-10-04 16:51:27 +020059 return get_irq_iommu(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080060}
61
Yinghai Lu70590ea2009-08-26 16:21:54 -070062static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080063{
Thomas Gleixnera8ef54a2010-10-04 16:51:27 +020064 struct irq_data *data = irq_get_irq_data(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080065
Thomas Gleixnera8ef54a2010-10-04 16:51:27 +020066 if (WARN_ONCE(data->irq_2_iommu,
67 KERN_DEBUG "irq_2_iommu!=NULL irq %u\n", irq))
68 return data->irq_2_iommu;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080069
Thomas Gleixnera8ef54a2010-10-04 16:51:27 +020070 data->irq_2_iommu = kzalloc_node(sizeof(*data->irq_2_iommu),
71 GFP_ATOMIC, data->node);
72 return data->irq_2_iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -070073}
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020074
Thomas Gleixner0e1e3672010-10-04 16:20:16 +020075static void irq_2_iommu_free(unsigned int irq)
76{
77 struct irq_data *d = irq_get_irq_data(irq);
78 struct irq_2_iommu *p = d->irq_2_iommu;
79
80 d->irq_2_iommu = NULL;
81 kfree(p);
82}
83
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080084#else /* !CONFIG_SPARSE_IRQ */
85
86static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
87
88static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
89{
90 if (irq < nr_irqs)
91 return &irq_2_iommuX[irq];
92
93 return NULL;
94}
95static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
96{
Yinghai Lue420dfb2008-08-19 20:50:21 -070097 return irq_2_iommu(irq);
98}
Thomas Gleixner0e1e3672010-10-04 16:20:16 +020099
100static void irq_2_iommu_free(unsigned int irq) { }
101
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800102#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700103
104static DEFINE_SPINLOCK(irq_2_ir_lock);
105
Yinghai Lue420dfb2008-08-19 20:50:21 -0700106static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
107{
108 struct irq_2_iommu *irq_iommu;
109
110 irq_iommu = irq_2_iommu(irq);
111
112 if (!irq_iommu)
113 return NULL;
114
115 if (!irq_iommu->iommu)
116 return NULL;
117
118 return irq_iommu;
119}
120
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700121int irq_remapped(int irq)
122{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700123 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700124}
125
126int get_irte(int irq, struct irte *entry)
127{
128 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700129 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700130 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700131
Yinghai Lue420dfb2008-08-19 20:50:21 -0700132 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700133 return -1;
134
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700135 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700136 irq_iommu = valid_irq_2_iommu(irq);
137 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700138 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700139 return -1;
140 }
141
Yinghai Lue420dfb2008-08-19 20:50:21 -0700142 index = irq_iommu->irte_index + irq_iommu->sub_handle;
143 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700144
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700145 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700146 return 0;
147}
148
149int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
150{
151 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700152 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700153 u16 index, start_index;
154 unsigned int mask = 0;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700155 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700156 int i;
157
158 if (!count)
159 return -1;
160
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800161#ifndef CONFIG_SPARSE_IRQ
Yinghai Lue420dfb2008-08-19 20:50:21 -0700162 /* protect irq_2_iommu_alloc later */
163 if (irq >= nr_irqs)
164 return -1;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800165#endif
Yinghai Lue420dfb2008-08-19 20:50:21 -0700166
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700167 /*
168 * start the IRTE search from index 0.
169 */
170 index = start_index = 0;
171
172 if (count > 1) {
173 count = __roundup_pow_of_two(count);
174 mask = ilog2(count);
175 }
176
177 if (mask > ecap_max_handle_mask(iommu->ecap)) {
178 printk(KERN_ERR
179 "Requested mask %x exceeds the max invalidation handle"
180 " mask value %Lx\n", mask,
181 ecap_max_handle_mask(iommu->ecap));
182 return -1;
183 }
184
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700185 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700186 do {
187 for (i = index; i < index + count; i++)
188 if (table->base[i].present)
189 break;
190 /* empty index found */
191 if (i == index + count)
192 break;
193
194 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
195
196 if (index == start_index) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700197 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700198 printk(KERN_ERR "can't allocate an IRTE\n");
199 return -1;
200 }
201 } while (1);
202
203 for (i = index; i < index + count; i++)
204 table->base[i].present = 1;
205
Yinghai Lue420dfb2008-08-19 20:50:21 -0700206 irq_iommu = irq_2_iommu_alloc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800207 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800209 printk(KERN_ERR "can't allocate irq_2_iommu\n");
210 return -1;
211 }
212
Yinghai Lue420dfb2008-08-19 20:50:21 -0700213 irq_iommu->iommu = iommu;
214 irq_iommu->irte_index = index;
215 irq_iommu->sub_handle = 0;
216 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700217
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700218 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700219
220 return index;
221}
222
Yu Zhao704126a2009-01-04 16:28:52 +0800223static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700224{
225 struct qi_desc desc;
226
227 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
228 | QI_IEC_SELECTIVE;
229 desc.high = 0;
230
Yu Zhao704126a2009-01-04 16:28:52 +0800231 return qi_submit_sync(&desc, iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700232}
233
234int map_irq_to_irte_handle(int irq, u16 *sub_handle)
235{
236 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700237 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700238 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700239
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700240 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700241 irq_iommu = valid_irq_2_iommu(irq);
242 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700243 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700244 return -1;
245 }
246
Yinghai Lue420dfb2008-08-19 20:50:21 -0700247 *sub_handle = irq_iommu->sub_handle;
248 index = irq_iommu->irte_index;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700249 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700250 return index;
251}
252
253int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
254{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700255 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700256 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700257
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700258 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700259
260 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700261
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800262 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800264 printk(KERN_ERR "can't allocate irq_2_iommu\n");
265 return -1;
266 }
267
Yinghai Lue420dfb2008-08-19 20:50:21 -0700268 irq_iommu->iommu = iommu;
269 irq_iommu->irte_index = index;
270 irq_iommu->sub_handle = subhandle;
271 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700272
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700273 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700274
275 return 0;
276}
277
278int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
279{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700280 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700281 unsigned long flags;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700282
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700283 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700284 irq_iommu = valid_irq_2_iommu(irq);
285 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700286 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700287 return -1;
288 }
289
Yinghai Lue420dfb2008-08-19 20:50:21 -0700290 irq_iommu->iommu = NULL;
291 irq_iommu->irte_index = 0;
292 irq_iommu->sub_handle = 0;
293 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700294
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700295 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700296
297 return 0;
298}
299
300int modify_irte(int irq, struct irte *irte_modified)
301{
Yu Zhao704126a2009-01-04 16:28:52 +0800302 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700303 int index;
304 struct irte *irte;
305 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700306 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700307 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700308
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700309 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700310 irq_iommu = valid_irq_2_iommu(irq);
311 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700312 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700313 return -1;
314 }
315
Yinghai Lue420dfb2008-08-19 20:50:21 -0700316 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700317
Yinghai Lue420dfb2008-08-19 20:50:21 -0700318 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700319 irte = &iommu->ir_table->base[index];
320
Linus Torvaldsc513b672010-08-06 11:02:31 -0700321 set_64bit(&irte->low, irte_modified->low);
322 set_64bit(&irte->high, irte_modified->high);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700323 __iommu_flush_cache(iommu, irte, sizeof(*irte));
324
Yu Zhao704126a2009-01-04 16:28:52 +0800325 rc = qi_flush_iec(iommu, index, 0);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700326 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800327
328 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700329}
330
331int flush_irte(int irq)
332{
Yu Zhao704126a2009-01-04 16:28:52 +0800333 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700334 int index;
335 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700336 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700337 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700338
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700339 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700340 irq_iommu = valid_irq_2_iommu(irq);
341 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700342 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700343 return -1;
344 }
345
Yinghai Lue420dfb2008-08-19 20:50:21 -0700346 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700347
Yinghai Lue420dfb2008-08-19 20:50:21 -0700348 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700349
Yu Zhao704126a2009-01-04 16:28:52 +0800350 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700351 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700352
Yu Zhao704126a2009-01-04 16:28:52 +0800353 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700354}
355
Suresh Siddha20f30972009-08-04 12:07:08 -0700356struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
357{
358 int i;
359
360 for (i = 0; i < MAX_HPET_TBS; i++)
361 if (ir_hpet[i].id == hpet_id)
362 return ir_hpet[i].iommu;
363 return NULL;
364}
365
Suresh Siddha89027d32008-07-10 11:16:56 -0700366struct intel_iommu *map_ioapic_to_ir(int apic)
367{
368 int i;
369
370 for (i = 0; i < MAX_IO_APICS; i++)
371 if (ir_ioapic[i].id == apic)
372 return ir_ioapic[i].iommu;
373 return NULL;
374}
375
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700376struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
377{
378 struct dmar_drhd_unit *drhd;
379
380 drhd = dmar_find_matched_drhd_unit(dev);
381 if (!drhd)
382 return NULL;
383
384 return drhd->iommu;
385}
386
Weidong Hanc4658b42009-05-23 00:41:14 +0800387static int clear_entries(struct irq_2_iommu *irq_iommu)
388{
389 struct irte *start, *entry, *end;
390 struct intel_iommu *iommu;
391 int index;
392
393 if (irq_iommu->sub_handle)
394 return 0;
395
396 iommu = irq_iommu->iommu;
397 index = irq_iommu->irte_index + irq_iommu->sub_handle;
398
399 start = iommu->ir_table->base + index;
400 end = start + (1 << irq_iommu->irte_mask);
401
402 for (entry = start; entry < end; entry++) {
Linus Torvaldsc513b672010-08-06 11:02:31 -0700403 set_64bit(&entry->low, 0);
404 set_64bit(&entry->high, 0);
Weidong Hanc4658b42009-05-23 00:41:14 +0800405 }
406
407 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
408}
409
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700410int free_irte(int irq)
411{
Yu Zhao704126a2009-01-04 16:28:52 +0800412 int rc = 0;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700413 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700414 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700415
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700416 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700417 irq_iommu = valid_irq_2_iommu(irq);
418 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700419 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700420 return -1;
421 }
422
Weidong Hanc4658b42009-05-23 00:41:14 +0800423 rc = clear_entries(irq_iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700424
Yinghai Lue420dfb2008-08-19 20:50:21 -0700425 irq_iommu->iommu = NULL;
426 irq_iommu->irte_index = 0;
427 irq_iommu->sub_handle = 0;
428 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700429
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700430 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700431
Thomas Gleixner0e1e3672010-10-04 16:20:16 +0200432 irq_2_iommu_free(irq);
433
Yu Zhao704126a2009-01-04 16:28:52 +0800434 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700435}
436
Weidong Hanf007e992009-05-23 00:41:15 +0800437/*
438 * source validation type
439 */
440#define SVT_NO_VERIFY 0x0 /* no verification is required */
441#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
442#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
443
444/*
445 * source-id qualifier
446 */
447#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
448#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
449 * the third least significant bit
450 */
451#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
452 * the second and third least significant bits
453 */
454#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
455 * the least three significant bits
456 */
457
458/*
459 * set SVT, SQ and SID fields of irte to verify
460 * source ids of interrupt requests
461 */
462static void set_irte_sid(struct irte *irte, unsigned int svt,
463 unsigned int sq, unsigned int sid)
464{
Chris Wrightd1423d52010-07-20 11:06:49 -0700465 if (disable_sourceid_checking)
466 svt = SVT_NO_VERIFY;
Weidong Hanf007e992009-05-23 00:41:15 +0800467 irte->svt = svt;
468 irte->sq = sq;
469 irte->sid = sid;
470}
471
472int set_ioapic_sid(struct irte *irte, int apic)
473{
474 int i;
475 u16 sid = 0;
476
477 if (!irte)
478 return -1;
479
480 for (i = 0; i < MAX_IO_APICS; i++) {
481 if (ir_ioapic[i].id == apic) {
482 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
483 break;
484 }
485 }
486
487 if (sid == 0) {
488 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
489 return -1;
490 }
491
492 set_irte_sid(irte, 1, 0, sid);
493
494 return 0;
495}
496
Suresh Siddha20f30972009-08-04 12:07:08 -0700497int set_hpet_sid(struct irte *irte, u8 id)
498{
499 int i;
500 u16 sid = 0;
501
502 if (!irte)
503 return -1;
504
505 for (i = 0; i < MAX_HPET_TBS; i++) {
506 if (ir_hpet[i].id == id) {
507 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
508 break;
509 }
510 }
511
512 if (sid == 0) {
513 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
514 return -1;
515 }
516
517 /*
518 * Should really use SQ_ALL_16. Some platforms are broken.
519 * While we figure out the right quirks for these broken platforms, use
520 * SQ_13_IGNORE_3 for now.
521 */
522 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
523
524 return 0;
525}
526
Weidong Hanf007e992009-05-23 00:41:15 +0800527int set_msi_sid(struct irte *irte, struct pci_dev *dev)
528{
529 struct pci_dev *bridge;
530
531 if (!irte || !dev)
532 return -1;
533
534 /* PCIe device or Root Complex integrated PCI device */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +0900535 if (pci_is_pcie(dev) || !dev->bus->parent) {
Weidong Hanf007e992009-05-23 00:41:15 +0800536 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
537 (dev->bus->number << 8) | dev->devfn);
538 return 0;
539 }
540
541 bridge = pci_find_upstream_pcie_bridge(dev);
542 if (bridge) {
Stefan Assmann45e829e2009-12-03 06:49:24 -0500543 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
Weidong Hanf007e992009-05-23 00:41:15 +0800544 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
545 (bridge->bus->number << 8) | dev->bus->number);
546 else /* this is a legacy PCI bridge */
547 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
548 (bridge->bus->number << 8) | bridge->devfn);
549 }
550
551 return 0;
552}
553
Suresh Siddha2ae21012008-07-10 11:16:43 -0700554static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
555{
556 u64 addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100557 u32 sts;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700558 unsigned long flags;
559
560 addr = virt_to_phys((void *)iommu->ir_table->base);
561
562 spin_lock_irqsave(&iommu->register_lock, flags);
563
564 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
565 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
566
567 /* Set interrupt-remapping table pointer */
Han, Weidong161fde02009-04-03 17:15:47 +0800568 iommu->gcmd |= DMA_GCMD_SIRTP;
David Woodhousec416daa2009-05-10 20:30:58 +0100569 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700570
571 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
572 readl, (sts & DMA_GSTS_IRTPS), sts);
573 spin_unlock_irqrestore(&iommu->register_lock, flags);
574
575 /*
576 * global invalidation of interrupt entry cache before enabling
577 * interrupt-remapping.
578 */
579 qi_global_iec(iommu);
580
581 spin_lock_irqsave(&iommu->register_lock, flags);
582
583 /* Enable interrupt-remapping */
Suresh Siddha2ae21012008-07-10 11:16:43 -0700584 iommu->gcmd |= DMA_GCMD_IRE;
David Woodhousec416daa2009-05-10 20:30:58 +0100585 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700586
587 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
588 readl, (sts & DMA_GSTS_IRES), sts);
589
590 spin_unlock_irqrestore(&iommu->register_lock, flags);
591}
592
593
594static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
595{
596 struct ir_table *ir_table;
597 struct page *pages;
598
599 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700600 GFP_ATOMIC);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700601
602 if (!iommu->ir_table)
603 return -ENOMEM;
604
Suresh Siddha824cd752009-10-02 11:01:23 -0700605 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
606 INTR_REMAP_PAGE_ORDER);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700607
608 if (!pages) {
609 printk(KERN_ERR "failed to allocate pages of order %d\n",
610 INTR_REMAP_PAGE_ORDER);
611 kfree(iommu->ir_table);
612 return -ENOMEM;
613 }
614
615 ir_table->base = page_address(pages);
616
617 iommu_set_intr_remapping(iommu, mode);
618 return 0;
619}
620
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700621/*
622 * Disable Interrupt Remapping.
623 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700624static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700625{
626 unsigned long flags;
627 u32 sts;
628
629 if (!ecap_ir_support(iommu->ecap))
630 return;
631
Fenghua Yub24696b2009-03-27 14:22:44 -0700632 /*
633 * global invalidation of interrupt entry cache before disabling
634 * interrupt-remapping.
635 */
636 qi_global_iec(iommu);
637
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700638 spin_lock_irqsave(&iommu->register_lock, flags);
639
640 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
641 if (!(sts & DMA_GSTS_IRES))
642 goto end;
643
644 iommu->gcmd &= ~DMA_GCMD_IRE;
645 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
646
647 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
648 readl, !(sts & DMA_GSTS_IRES), sts);
649
650end:
651 spin_unlock_irqrestore(&iommu->register_lock, flags);
652}
653
Weidong Han93758232009-04-17 16:42:14 +0800654int __init intr_remapping_supported(void)
655{
656 struct dmar_drhd_unit *drhd;
657
Weidong Han03ea8152009-04-17 16:42:15 +0800658 if (disable_intremap)
659 return 0;
660
Youquan Song074835f2009-09-09 12:05:39 -0400661 if (!dmar_ir_support())
662 return 0;
663
Weidong Han93758232009-04-17 16:42:14 +0800664 for_each_drhd_unit(drhd) {
665 struct intel_iommu *iommu = drhd->iommu;
666
667 if (!ecap_ir_support(iommu->ecap))
668 return 0;
669 }
670
671 return 1;
672}
673
Suresh Siddha2ae21012008-07-10 11:16:43 -0700674int __init enable_intr_remapping(int eim)
675{
676 struct dmar_drhd_unit *drhd;
677 int setup = 0;
678
Youquan Songe936d072009-09-07 10:58:07 -0400679 if (parse_ioapics_under_ir() != 1) {
680 printk(KERN_INFO "Not enable interrupt remapping\n");
681 return -1;
682 }
683
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700684 for_each_drhd_unit(drhd) {
685 struct intel_iommu *iommu = drhd->iommu;
686
687 /*
Han, Weidong34aaaa92009-04-04 17:21:26 +0800688 * If the queued invalidation is already initialized,
689 * shouldn't disable it.
690 */
691 if (iommu->qi)
692 continue;
693
694 /*
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700695 * Clear previous faults.
696 */
697 dmar_fault(-1, iommu);
698
699 /*
700 * Disable intr remapping and queued invalidation, if already
701 * enabled prior to OS handover.
702 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700703 iommu_disable_intr_remapping(iommu);
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700704
705 dmar_disable_qi(iommu);
706 }
707
Suresh Siddha2ae21012008-07-10 11:16:43 -0700708 /*
709 * check for the Interrupt-remapping support
710 */
711 for_each_drhd_unit(drhd) {
712 struct intel_iommu *iommu = drhd->iommu;
713
714 if (!ecap_ir_support(iommu->ecap))
715 continue;
716
717 if (eim && !ecap_eim_support(iommu->ecap)) {
718 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
719 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
720 return -1;
721 }
722 }
723
724 /*
725 * Enable queued invalidation for all the DRHD's.
726 */
727 for_each_drhd_unit(drhd) {
728 int ret;
729 struct intel_iommu *iommu = drhd->iommu;
730 ret = dmar_enable_qi(iommu);
731
732 if (ret) {
733 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
734 " invalidation, ecap %Lx, ret %d\n",
735 drhd->reg_base_addr, iommu->ecap, ret);
736 return -1;
737 }
738 }
739
740 /*
741 * Setup Interrupt-remapping for all the DRHD's now.
742 */
743 for_each_drhd_unit(drhd) {
744 struct intel_iommu *iommu = drhd->iommu;
745
746 if (!ecap_ir_support(iommu->ecap))
747 continue;
748
749 if (setup_intr_remapping(iommu, eim))
750 goto error;
751
752 setup = 1;
753 }
754
755 if (!setup)
756 goto error;
757
758 intr_remapping_enabled = 1;
759
760 return 0;
761
762error:
763 /*
764 * handle error condition gracefully here!
765 */
766 return -1;
767}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700768
Suresh Siddha20f30972009-08-04 12:07:08 -0700769static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
770 struct intel_iommu *iommu)
771{
772 struct acpi_dmar_pci_path *path;
773 u8 bus;
774 int count;
775
776 bus = scope->bus;
777 path = (struct acpi_dmar_pci_path *)(scope + 1);
778 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
779 / sizeof(struct acpi_dmar_pci_path);
780
781 while (--count > 0) {
782 /*
783 * Access PCI directly due to the PCI
784 * subsystem isn't initialized yet.
785 */
786 bus = read_pci_config_byte(bus, path->dev, path->fn,
787 PCI_SECONDARY_BUS);
788 path++;
789 }
790 ir_hpet[ir_hpet_num].bus = bus;
791 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
792 ir_hpet[ir_hpet_num].iommu = iommu;
793 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
794 ir_hpet_num++;
795}
796
Weidong Hanf007e992009-05-23 00:41:15 +0800797static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
798 struct intel_iommu *iommu)
799{
800 struct acpi_dmar_pci_path *path;
801 u8 bus;
802 int count;
803
804 bus = scope->bus;
805 path = (struct acpi_dmar_pci_path *)(scope + 1);
806 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
807 / sizeof(struct acpi_dmar_pci_path);
808
809 while (--count > 0) {
810 /*
811 * Access PCI directly due to the PCI
812 * subsystem isn't initialized yet.
813 */
814 bus = read_pci_config_byte(bus, path->dev, path->fn,
815 PCI_SECONDARY_BUS);
816 path++;
817 }
818
819 ir_ioapic[ir_ioapic_num].bus = bus;
820 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
821 ir_ioapic[ir_ioapic_num].iommu = iommu;
822 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
823 ir_ioapic_num++;
824}
825
Suresh Siddha20f30972009-08-04 12:07:08 -0700826static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
827 struct intel_iommu *iommu)
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700828{
829 struct acpi_dmar_hardware_unit *drhd;
830 struct acpi_dmar_device_scope *scope;
831 void *start, *end;
832
833 drhd = (struct acpi_dmar_hardware_unit *)header;
834
835 start = (void *)(drhd + 1);
836 end = ((void *)drhd) + header->length;
837
838 while (start < end) {
839 scope = start;
840 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
841 if (ir_ioapic_num == MAX_IO_APICS) {
842 printk(KERN_WARNING "Exceeded Max IO APICS\n");
843 return -1;
844 }
845
Yinghai Lu680a7522010-04-08 19:58:23 +0100846 printk(KERN_INFO "IOAPIC id %d under DRHD base "
847 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
848 drhd->address, iommu->seq_id);
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700849
Weidong Hanf007e992009-05-23 00:41:15 +0800850 ir_parse_one_ioapic_scope(scope, iommu);
Suresh Siddha20f30972009-08-04 12:07:08 -0700851 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
852 if (ir_hpet_num == MAX_HPET_TBS) {
853 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
854 return -1;
855 }
856
857 printk(KERN_INFO "HPET id %d under DRHD base"
858 " 0x%Lx\n", scope->enumeration_id,
859 drhd->address);
860
861 ir_parse_one_hpet_scope(scope, iommu);
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700862 }
863 start += scope->length;
864 }
865
866 return 0;
867}
868
869/*
870 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
871 * hardware unit.
872 */
873int __init parse_ioapics_under_ir(void)
874{
875 struct dmar_drhd_unit *drhd;
876 int ir_supported = 0;
877
878 for_each_drhd_unit(drhd) {
879 struct intel_iommu *iommu = drhd->iommu;
880
881 if (ecap_ir_support(iommu->ecap)) {
Suresh Siddha20f30972009-08-04 12:07:08 -0700882 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700883 return -1;
884
885 ir_supported = 1;
886 }
887 }
888
889 if (ir_supported && ir_ioapic_num != nr_ioapics) {
890 printk(KERN_WARNING
891 "Not all IO-APIC's listed under remapping hardware\n");
892 return -1;
893 }
894
895 return ir_supported;
896}
Fenghua Yub24696b2009-03-27 14:22:44 -0700897
898void disable_intr_remapping(void)
899{
900 struct dmar_drhd_unit *drhd;
901 struct intel_iommu *iommu = NULL;
902
903 /*
904 * Disable Interrupt-remapping for all the DRHD's now.
905 */
906 for_each_iommu(iommu, drhd) {
907 if (!ecap_ir_support(iommu->ecap))
908 continue;
909
910 iommu_disable_intr_remapping(iommu);
911 }
912}
913
914int reenable_intr_remapping(int eim)
915{
916 struct dmar_drhd_unit *drhd;
917 int setup = 0;
918 struct intel_iommu *iommu = NULL;
919
920 for_each_iommu(iommu, drhd)
921 if (iommu->qi)
922 dmar_reenable_qi(iommu);
923
924 /*
925 * Setup Interrupt-remapping for all the DRHD's now.
926 */
927 for_each_iommu(iommu, drhd) {
928 if (!ecap_ir_support(iommu->ecap))
929 continue;
930
931 /* Set up interrupt remapping for iommu.*/
932 iommu_set_intr_remapping(iommu, eim);
933 setup = 1;
934 }
935
936 if (!setup)
937 goto error;
938
939 return 0;
940
941error:
942 /*
943 * handle error condition gracefully here!
944 */
945 return -1;
946}
947