blob: 2dcf973890c46cfd96a9a4f2b7c06973c4ee399c [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
8#include "intel-iommu.h"
9#include "intr_remapping.h"
10
11static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070013int intr_remapping_enabled;
14
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070015struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070016 struct intel_iommu *iommu;
17 u16 irte_index;
18 u16 sub_handle;
19 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070020};
21
Yinghai Lue420dfb2008-08-19 20:50:21 -070022#ifdef CONFIG_HAVE_SPARSE_IRQ
23static struct irq_2_iommu *irq_2_iommuX;
24/* fill one page ? */
25static int nr_irq_2_iommu = 0x100;
26static int irq_2_iommu_index;
27DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
28
29extern void *__alloc_bootmem_nopanic(unsigned long size,
30 unsigned long align,
31 unsigned long goal);
32
33static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
34{
35 struct irq_2_iommu *iommu;
36 unsigned long total_bytes;
37
38 if (irq_2_iommu_index >= nr_irq_2_iommu) {
39 /*
40 * we run out of pre-allocate ones, allocate more
41 */
42 printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
43
44 total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
45
46 if (after_bootmem)
47 iommu = kzalloc(total_bytes, GFP_ATOMIC);
48 else
49 iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
50
51 if (!iommu)
52 panic("can not get more irq_2_iommu\n");
53
54 irq_2_iommuX = iommu;
55 irq_2_iommu_index = 0;
56 }
57
58 iommu = &irq_2_iommuX[irq_2_iommu_index];
59 irq_2_iommu_index++;
60 return iommu;
61}
62
63static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
64{
65 struct irq_desc *desc;
66
67 desc = irq_to_desc(irq);
68
69 BUG_ON(!desc);
70
71 return desc->irq_2_iommu;
72}
73
74static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
75{
76 struct irq_desc *desc;
77 struct irq_2_iommu *irq_iommu;
78
Suresh Siddha7ddfb652008-08-20 17:22:51 -070079 /*
80 * alloc irq desc if not allocated already.
81 */
82 desc = irq_to_desc_alloc(irq);
Yinghai Lue420dfb2008-08-19 20:50:21 -070083
84 irq_iommu = desc->irq_2_iommu;
85
86 if (!irq_iommu)
87 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
88
89 return desc->irq_2_iommu;
90}
91
92#else /* !CONFIG_HAVE_SPARSE_IRQ */
93
94#ifdef CONFIG_HAVE_DYN_ARRAY
95static struct irq_2_iommu *irq_2_iommuX;
96DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070097#else
Yinghai Lue420dfb2008-08-19 20:50:21 -070098static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
99#endif
100
101static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
102{
103 if (irq < nr_irqs)
104 return &irq_2_iommuX[irq];
105
106 return NULL;
107}
108static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
109{
110 return irq_2_iommu(irq);
111}
Yinghai Lu5aeecaf2008-08-19 20:49:59 -0700112#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700113
114static DEFINE_SPINLOCK(irq_2_ir_lock);
115
Yinghai Lue420dfb2008-08-19 20:50:21 -0700116static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
117{
118 struct irq_2_iommu *irq_iommu;
119
120 irq_iommu = irq_2_iommu(irq);
121
122 if (!irq_iommu)
123 return NULL;
124
125 if (!irq_iommu->iommu)
126 return NULL;
127
128 return irq_iommu;
129}
130
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700131int irq_remapped(int irq)
132{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700133 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700134}
135
136int get_irte(int irq, struct irte *entry)
137{
138 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700139 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700140
Yinghai Lue420dfb2008-08-19 20:50:21 -0700141 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700142 return -1;
143
144 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700145 irq_iommu = valid_irq_2_iommu(irq);
146 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700147 spin_unlock(&irq_2_ir_lock);
148 return -1;
149 }
150
Yinghai Lue420dfb2008-08-19 20:50:21 -0700151 index = irq_iommu->irte_index + irq_iommu->sub_handle;
152 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700153
154 spin_unlock(&irq_2_ir_lock);
155 return 0;
156}
157
158int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
159{
160 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700161 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700162 u16 index, start_index;
163 unsigned int mask = 0;
164 int i;
165
166 if (!count)
167 return -1;
168
Yinghai Lue420dfb2008-08-19 20:50:21 -0700169#ifndef CONFIG_HAVE_SPARSE_IRQ
170 /* protect irq_2_iommu_alloc later */
171 if (irq >= nr_irqs)
172 return -1;
173#endif
174
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700175 /*
176 * start the IRTE search from index 0.
177 */
178 index = start_index = 0;
179
180 if (count > 1) {
181 count = __roundup_pow_of_two(count);
182 mask = ilog2(count);
183 }
184
185 if (mask > ecap_max_handle_mask(iommu->ecap)) {
186 printk(KERN_ERR
187 "Requested mask %x exceeds the max invalidation handle"
188 " mask value %Lx\n", mask,
189 ecap_max_handle_mask(iommu->ecap));
190 return -1;
191 }
192
193 spin_lock(&irq_2_ir_lock);
194 do {
195 for (i = index; i < index + count; i++)
196 if (table->base[i].present)
197 break;
198 /* empty index found */
199 if (i == index + count)
200 break;
201
202 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
203
204 if (index == start_index) {
205 spin_unlock(&irq_2_ir_lock);
206 printk(KERN_ERR "can't allocate an IRTE\n");
207 return -1;
208 }
209 } while (1);
210
211 for (i = index; i < index + count; i++)
212 table->base[i].present = 1;
213
Yinghai Lue420dfb2008-08-19 20:50:21 -0700214 irq_iommu = irq_2_iommu_alloc(irq);
215 irq_iommu->iommu = iommu;
216 irq_iommu->irte_index = index;
217 irq_iommu->sub_handle = 0;
218 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700219
220 spin_unlock(&irq_2_ir_lock);
221
222 return index;
223}
224
225static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
226{
227 struct qi_desc desc;
228
229 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
230 | QI_IEC_SELECTIVE;
231 desc.high = 0;
232
233 qi_submit_sync(&desc, iommu);
234}
235
236int map_irq_to_irte_handle(int irq, u16 *sub_handle)
237{
238 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700239 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700240
241 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700242 irq_iommu = valid_irq_2_iommu(irq);
243 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700244 spin_unlock(&irq_2_ir_lock);
245 return -1;
246 }
247
Yinghai Lue420dfb2008-08-19 20:50:21 -0700248 *sub_handle = irq_iommu->sub_handle;
249 index = irq_iommu->irte_index;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700250 spin_unlock(&irq_2_ir_lock);
251 return index;
252}
253
254int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700256 struct irq_2_iommu *irq_iommu;
257
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700258 spin_lock(&irq_2_ir_lock);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700259
260 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700261
Yinghai Lue420dfb2008-08-19 20:50:21 -0700262 irq_iommu->iommu = iommu;
263 irq_iommu->irte_index = index;
264 irq_iommu->sub_handle = subhandle;
265 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700266
267 spin_unlock(&irq_2_ir_lock);
268
269 return 0;
270}
271
272int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
273{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700274 struct irq_2_iommu *irq_iommu;
275
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700276 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700277 irq_iommu = valid_irq_2_iommu(irq);
278 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700279 spin_unlock(&irq_2_ir_lock);
280 return -1;
281 }
282
Yinghai Lue420dfb2008-08-19 20:50:21 -0700283 irq_iommu->iommu = NULL;
284 irq_iommu->irte_index = 0;
285 irq_iommu->sub_handle = 0;
286 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700287
288 spin_unlock(&irq_2_ir_lock);
289
290 return 0;
291}
292
293int modify_irte(int irq, struct irte *irte_modified)
294{
295 int index;
296 struct irte *irte;
297 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700298 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700299
300 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700301 irq_iommu = valid_irq_2_iommu(irq);
302 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700303 spin_unlock(&irq_2_ir_lock);
304 return -1;
305 }
306
Yinghai Lue420dfb2008-08-19 20:50:21 -0700307 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700308
Yinghai Lue420dfb2008-08-19 20:50:21 -0700309 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700310 irte = &iommu->ir_table->base[index];
311
312 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
313 __iommu_flush_cache(iommu, irte, sizeof(*irte));
314
315 qi_flush_iec(iommu, index, 0);
316
317 spin_unlock(&irq_2_ir_lock);
318 return 0;
319}
320
321int flush_irte(int irq)
322{
323 int index;
324 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700325 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700326
327 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700328 irq_iommu = valid_irq_2_iommu(irq);
329 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700330 spin_unlock(&irq_2_ir_lock);
331 return -1;
332 }
333
Yinghai Lue420dfb2008-08-19 20:50:21 -0700334 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700335
Yinghai Lue420dfb2008-08-19 20:50:21 -0700336 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700337
Yinghai Lue420dfb2008-08-19 20:50:21 -0700338 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700339 spin_unlock(&irq_2_ir_lock);
340
341 return 0;
342}
343
Suresh Siddha89027d32008-07-10 11:16:56 -0700344struct intel_iommu *map_ioapic_to_ir(int apic)
345{
346 int i;
347
348 for (i = 0; i < MAX_IO_APICS; i++)
349 if (ir_ioapic[i].id == apic)
350 return ir_ioapic[i].iommu;
351 return NULL;
352}
353
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700354struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
355{
356 struct dmar_drhd_unit *drhd;
357
358 drhd = dmar_find_matched_drhd_unit(dev);
359 if (!drhd)
360 return NULL;
361
362 return drhd->iommu;
363}
364
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700365int free_irte(int irq)
366{
367 int index, i;
368 struct irte *irte;
369 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700370 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700371
372 spin_lock(&irq_2_ir_lock);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700373 irq_iommu = valid_irq_2_iommu(irq);
374 if (!irq_iommu) {
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700375 spin_unlock(&irq_2_ir_lock);
376 return -1;
377 }
378
Yinghai Lue420dfb2008-08-19 20:50:21 -0700379 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700380
Yinghai Lue420dfb2008-08-19 20:50:21 -0700381 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700382 irte = &iommu->ir_table->base[index];
383
Yinghai Lue420dfb2008-08-19 20:50:21 -0700384 if (!irq_iommu->sub_handle) {
385 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700386 set_64bit((unsigned long *)irte, 0);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700387 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700388 }
389
Yinghai Lue420dfb2008-08-19 20:50:21 -0700390 irq_iommu->iommu = NULL;
391 irq_iommu->irte_index = 0;
392 irq_iommu->sub_handle = 0;
393 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700394
395 spin_unlock(&irq_2_ir_lock);
396
397 return 0;
398}
399
Suresh Siddha2ae21012008-07-10 11:16:43 -0700400static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
401{
402 u64 addr;
403 u32 cmd, sts;
404 unsigned long flags;
405
406 addr = virt_to_phys((void *)iommu->ir_table->base);
407
408 spin_lock_irqsave(&iommu->register_lock, flags);
409
410 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
411 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
412
413 /* Set interrupt-remapping table pointer */
414 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
415 writel(cmd, iommu->reg + DMAR_GCMD_REG);
416
417 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
418 readl, (sts & DMA_GSTS_IRTPS), sts);
419 spin_unlock_irqrestore(&iommu->register_lock, flags);
420
421 /*
422 * global invalidation of interrupt entry cache before enabling
423 * interrupt-remapping.
424 */
425 qi_global_iec(iommu);
426
427 spin_lock_irqsave(&iommu->register_lock, flags);
428
429 /* Enable interrupt-remapping */
430 cmd = iommu->gcmd | DMA_GCMD_IRE;
431 iommu->gcmd |= DMA_GCMD_IRE;
432 writel(cmd, iommu->reg + DMAR_GCMD_REG);
433
434 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
435 readl, (sts & DMA_GSTS_IRES), sts);
436
437 spin_unlock_irqrestore(&iommu->register_lock, flags);
438}
439
440
441static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
442{
443 struct ir_table *ir_table;
444 struct page *pages;
445
446 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
447 GFP_KERNEL);
448
449 if (!iommu->ir_table)
450 return -ENOMEM;
451
452 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
453
454 if (!pages) {
455 printk(KERN_ERR "failed to allocate pages of order %d\n",
456 INTR_REMAP_PAGE_ORDER);
457 kfree(iommu->ir_table);
458 return -ENOMEM;
459 }
460
461 ir_table->base = page_address(pages);
462
463 iommu_set_intr_remapping(iommu, mode);
464 return 0;
465}
466
467int __init enable_intr_remapping(int eim)
468{
469 struct dmar_drhd_unit *drhd;
470 int setup = 0;
471
472 /*
473 * check for the Interrupt-remapping support
474 */
475 for_each_drhd_unit(drhd) {
476 struct intel_iommu *iommu = drhd->iommu;
477
478 if (!ecap_ir_support(iommu->ecap))
479 continue;
480
481 if (eim && !ecap_eim_support(iommu->ecap)) {
482 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
483 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
484 return -1;
485 }
486 }
487
488 /*
489 * Enable queued invalidation for all the DRHD's.
490 */
491 for_each_drhd_unit(drhd) {
492 int ret;
493 struct intel_iommu *iommu = drhd->iommu;
494 ret = dmar_enable_qi(iommu);
495
496 if (ret) {
497 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
498 " invalidation, ecap %Lx, ret %d\n",
499 drhd->reg_base_addr, iommu->ecap, ret);
500 return -1;
501 }
502 }
503
504 /*
505 * Setup Interrupt-remapping for all the DRHD's now.
506 */
507 for_each_drhd_unit(drhd) {
508 struct intel_iommu *iommu = drhd->iommu;
509
510 if (!ecap_ir_support(iommu->ecap))
511 continue;
512
513 if (setup_intr_remapping(iommu, eim))
514 goto error;
515
516 setup = 1;
517 }
518
519 if (!setup)
520 goto error;
521
522 intr_remapping_enabled = 1;
523
524 return 0;
525
526error:
527 /*
528 * handle error condition gracefully here!
529 */
530 return -1;
531}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700532
533static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
534 struct intel_iommu *iommu)
535{
536 struct acpi_dmar_hardware_unit *drhd;
537 struct acpi_dmar_device_scope *scope;
538 void *start, *end;
539
540 drhd = (struct acpi_dmar_hardware_unit *)header;
541
542 start = (void *)(drhd + 1);
543 end = ((void *)drhd) + header->length;
544
545 while (start < end) {
546 scope = start;
547 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
548 if (ir_ioapic_num == MAX_IO_APICS) {
549 printk(KERN_WARNING "Exceeded Max IO APICS\n");
550 return -1;
551 }
552
553 printk(KERN_INFO "IOAPIC id %d under DRHD base"
554 " 0x%Lx\n", scope->enumeration_id,
555 drhd->address);
556
557 ir_ioapic[ir_ioapic_num].iommu = iommu;
558 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
559 ir_ioapic_num++;
560 }
561 start += scope->length;
562 }
563
564 return 0;
565}
566
567/*
568 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
569 * hardware unit.
570 */
571int __init parse_ioapics_under_ir(void)
572{
573 struct dmar_drhd_unit *drhd;
574 int ir_supported = 0;
575
576 for_each_drhd_unit(drhd) {
577 struct intel_iommu *iommu = drhd->iommu;
578
579 if (ecap_ir_support(iommu->ecap)) {
580 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
581 return -1;
582
583 ir_supported = 1;
584 }
585 }
586
587 if (ir_supported && ir_ioapic_num != nr_ioapics) {
588 printk(KERN_WARNING
589 "Not all IO-APIC's listed under remapping hardware\n");
590 return -1;
591 }
592
593 return ir_supported;
594}