| Yinghai Lu | 5aeecaf | 2008-08-19 20:49:59 -0700 | [diff] [blame] | 1 | #include <linux/interrupt.h> | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 2 | #include <linux/dmar.h> | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 3 | #include <linux/spinlock.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 4 | #include <linux/slab.h> | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 5 | #include <linux/jiffies.h> | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 6 | #include <linux/hpet.h> | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 7 | #include <linux/pci.h> | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 8 | #include <linux/irq.h> | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 9 | #include <asm/io_apic.h> | 
| Yinghai Lu | 17483a1 | 2008-12-12 13:14:18 -0800 | [diff] [blame] | 10 | #include <asm/smp.h> | 
| Jaswinder Singh Rajput | 6d652ea | 2009-01-07 21:38:59 +0530 | [diff] [blame] | 11 | #include <asm/cpu.h> | 
| Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 12 | #include <linux/intel-iommu.h> | 
| Alexander Beregalov | 46f06b72 | 2009-04-06 16:45:28 +0100 | [diff] [blame] | 13 | #include <acpi/acpi.h> | 
| Suresh Siddha | 8a8f422 | 2012-03-30 11:47:08 -0700 | [diff] [blame] | 14 | #include <asm/irq_remapping.h> | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 15 | #include <asm/pci-direct.h> | 
| Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 16 | #include <asm/msidef.h> | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 17 |  | 
| Suresh Siddha | 8a8f422 | 2012-03-30 11:47:08 -0700 | [diff] [blame] | 18 | #include "irq_remapping.h" | 
| Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 19 |  | 
| Joerg Roedel | eef93fd | 2012-03-30 11:46:59 -0700 | [diff] [blame] | 20 | struct ioapic_scope { | 
|  | 21 | struct intel_iommu *iommu; | 
|  | 22 | unsigned int id; | 
|  | 23 | unsigned int bus;	/* PCI bus number */ | 
|  | 24 | unsigned int devfn;	/* PCI devfn number */ | 
|  | 25 | }; | 
|  | 26 |  | 
|  | 27 | struct hpet_scope { | 
|  | 28 | struct intel_iommu *iommu; | 
|  | 29 | u8 id; | 
|  | 30 | unsigned int bus; | 
|  | 31 | unsigned int devfn; | 
|  | 32 | }; | 
|  | 33 |  | 
|  | 34 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 
| Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 35 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) | 
| Joerg Roedel | eef93fd | 2012-03-30 11:46:59 -0700 | [diff] [blame] | 36 |  | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 
|  | 39 | static int ir_ioapic_num, ir_hpet_num; | 
| Chris Wright | d1423d5 | 2010-07-20 11:06:49 -0700 | [diff] [blame] | 40 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 41 | static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 42 |  | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 43 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | 
|  | 44 | { | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 45 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 
| Thomas Gleixner | 349d676 | 2010-10-10 12:29:27 +0200 | [diff] [blame] | 46 | return cfg ? &cfg->irq_2_iommu : NULL; | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 47 | } | 
|  | 48 |  | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 49 | int get_irte(int irq, struct irte *entry) | 
|  | 50 | { | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 51 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 52 | unsigned long flags; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 53 | int index; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 54 |  | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 55 | if (!entry || !irq_iommu) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 56 | return -1; | 
|  | 57 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 58 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 59 |  | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 60 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 
|  | 61 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 62 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 63 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 64 | return 0; | 
|  | 65 | } | 
|  | 66 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 67 | static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 68 | { | 
|  | 69 | struct ir_table *table = iommu->ir_table; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 70 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Joerg Roedel | 9b1b0e4 | 2012-09-26 12:44:45 +0200 | [diff] [blame] | 71 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 72 | u16 index, start_index; | 
|  | 73 | unsigned int mask = 0; | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 74 | unsigned long flags; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 75 | int i; | 
|  | 76 |  | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 77 | if (!count || !irq_iommu) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 78 | return -1; | 
|  | 79 |  | 
|  | 80 | /* | 
|  | 81 | * start the IRTE search from index 0. | 
|  | 82 | */ | 
|  | 83 | index = start_index = 0; | 
|  | 84 |  | 
|  | 85 | if (count > 1) { | 
|  | 86 | count = __roundup_pow_of_two(count); | 
|  | 87 | mask = ilog2(count); | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | 
|  | 91 | printk(KERN_ERR | 
|  | 92 | "Requested mask %x exceeds the max invalidation handle" | 
|  | 93 | " mask value %Lx\n", mask, | 
|  | 94 | ecap_max_handle_mask(iommu->ecap)); | 
|  | 95 | return -1; | 
|  | 96 | } | 
|  | 97 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 98 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 99 | do { | 
|  | 100 | for (i = index; i < index + count; i++) | 
|  | 101 | if  (table->base[i].present) | 
|  | 102 | break; | 
|  | 103 | /* empty index found */ | 
|  | 104 | if (i == index + count) | 
|  | 105 | break; | 
|  | 106 |  | 
|  | 107 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | 
|  | 108 |  | 
|  | 109 | if (index == start_index) { | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 110 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 111 | printk(KERN_ERR "can't allocate an IRTE\n"); | 
|  | 112 | return -1; | 
|  | 113 | } | 
|  | 114 | } while (1); | 
|  | 115 |  | 
|  | 116 | for (i = index; i < index + count; i++) | 
|  | 117 | table->base[i].present = 1; | 
|  | 118 |  | 
| Joerg Roedel | 9b1b0e4 | 2012-09-26 12:44:45 +0200 | [diff] [blame] | 119 | cfg->remapped = 1; | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 120 | irq_iommu->iommu = iommu; | 
|  | 121 | irq_iommu->irte_index =  index; | 
|  | 122 | irq_iommu->sub_handle = 0; | 
|  | 123 | irq_iommu->irte_mask = mask; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 124 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 125 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 126 |  | 
|  | 127 | return index; | 
|  | 128 | } | 
|  | 129 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 130 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 131 | { | 
|  | 132 | struct qi_desc desc; | 
|  | 133 |  | 
|  | 134 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | 
|  | 135 | | QI_IEC_SELECTIVE; | 
|  | 136 | desc.high = 0; | 
|  | 137 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 138 | return qi_submit_sync(&desc, iommu); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 139 | } | 
|  | 140 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 141 | static int map_irq_to_irte_handle(int irq, u16 *sub_handle) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 142 | { | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 143 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 144 | unsigned long flags; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 145 | int index; | 
|  | 146 |  | 
|  | 147 | if (!irq_iommu) | 
|  | 148 | return -1; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 149 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 150 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 151 | *sub_handle = irq_iommu->sub_handle; | 
|  | 152 | index = irq_iommu->irte_index; | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 153 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 154 | return index; | 
|  | 155 | } | 
|  | 156 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 157 | static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 158 | { | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 159 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Joerg Roedel | 9b1b0e4 | 2012-09-26 12:44:45 +0200 | [diff] [blame] | 160 | struct irq_cfg *cfg = irq_get_chip_data(irq); | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 161 | unsigned long flags; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 162 |  | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 163 | if (!irq_iommu) | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 164 | return -1; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 165 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 166 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 167 |  | 
| Joerg Roedel | 9b1b0e4 | 2012-09-26 12:44:45 +0200 | [diff] [blame] | 168 | cfg->remapped = 1; | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 169 | irq_iommu->iommu = iommu; | 
|  | 170 | irq_iommu->irte_index = index; | 
|  | 171 | irq_iommu->sub_handle = subhandle; | 
|  | 172 | irq_iommu->irte_mask = 0; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 173 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 174 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 175 |  | 
|  | 176 | return 0; | 
|  | 177 | } | 
|  | 178 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 179 | static int modify_irte(int irq, struct irte *irte_modified) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 180 | { | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 181 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 182 | struct intel_iommu *iommu; | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 183 | unsigned long flags; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 184 | struct irte *irte; | 
|  | 185 | int rc, index; | 
|  | 186 |  | 
|  | 187 | if (!irq_iommu) | 
|  | 188 | return -1; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 189 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 190 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 191 |  | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 192 | iommu = irq_iommu->iommu; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 193 |  | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 194 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 195 | irte = &iommu->ir_table->base[index]; | 
|  | 196 |  | 
| Linus Torvalds | c513b67 | 2010-08-06 11:02:31 -0700 | [diff] [blame] | 197 | set_64bit(&irte->low, irte_modified->low); | 
|  | 198 | set_64bit(&irte->high, irte_modified->high); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 199 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 
|  | 200 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 201 | rc = qi_flush_iec(iommu, index, 0); | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 202 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 203 |  | 
|  | 204 | return rc; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 205 | } | 
|  | 206 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 207 | static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 208 | { | 
|  | 209 | int i; | 
|  | 210 |  | 
|  | 211 | for (i = 0; i < MAX_HPET_TBS; i++) | 
|  | 212 | if (ir_hpet[i].id == hpet_id) | 
|  | 213 | return ir_hpet[i].iommu; | 
|  | 214 | return NULL; | 
|  | 215 | } | 
|  | 216 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 217 | static struct intel_iommu *map_ioapic_to_ir(int apic) | 
| Suresh Siddha | 89027d3 | 2008-07-10 11:16:56 -0700 | [diff] [blame] | 218 | { | 
|  | 219 | int i; | 
|  | 220 |  | 
|  | 221 | for (i = 0; i < MAX_IO_APICS; i++) | 
|  | 222 | if (ir_ioapic[i].id == apic) | 
|  | 223 | return ir_ioapic[i].iommu; | 
|  | 224 | return NULL; | 
|  | 225 | } | 
|  | 226 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 227 | static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | 
| Suresh Siddha | 75c46fa | 2008-07-10 11:16:57 -0700 | [diff] [blame] | 228 | { | 
|  | 229 | struct dmar_drhd_unit *drhd; | 
|  | 230 |  | 
|  | 231 | drhd = dmar_find_matched_drhd_unit(dev); | 
|  | 232 | if (!drhd) | 
|  | 233 | return NULL; | 
|  | 234 |  | 
|  | 235 | return drhd->iommu; | 
|  | 236 | } | 
|  | 237 |  | 
| Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 238 | static int clear_entries(struct irq_2_iommu *irq_iommu) | 
|  | 239 | { | 
|  | 240 | struct irte *start, *entry, *end; | 
|  | 241 | struct intel_iommu *iommu; | 
|  | 242 | int index; | 
|  | 243 |  | 
|  | 244 | if (irq_iommu->sub_handle) | 
|  | 245 | return 0; | 
|  | 246 |  | 
|  | 247 | iommu = irq_iommu->iommu; | 
|  | 248 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 
|  | 249 |  | 
|  | 250 | start = iommu->ir_table->base + index; | 
|  | 251 | end = start + (1 << irq_iommu->irte_mask); | 
|  | 252 |  | 
|  | 253 | for (entry = start; entry < end; entry++) { | 
| Linus Torvalds | c513b67 | 2010-08-06 11:02:31 -0700 | [diff] [blame] | 254 | set_64bit(&entry->low, 0); | 
|  | 255 | set_64bit(&entry->high, 0); | 
| Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 256 | } | 
|  | 257 |  | 
|  | 258 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 
|  | 259 | } | 
|  | 260 |  | 
| Joerg Roedel | 9d619f6 | 2012-03-30 11:47:04 -0700 | [diff] [blame] | 261 | static int free_irte(int irq) | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 262 | { | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 263 | struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); | 
| Suresh Siddha | 4c5502b | 2009-03-16 17:04:53 -0700 | [diff] [blame] | 264 | unsigned long flags; | 
| Thomas Gleixner | d585d06 | 2010-10-10 12:34:27 +0200 | [diff] [blame] | 265 | int rc; | 
|  | 266 |  | 
|  | 267 | if (!irq_iommu) | 
|  | 268 | return -1; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 269 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 270 | raw_spin_lock_irqsave(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 271 |  | 
| Weidong Han | c4658b4 | 2009-05-23 00:41:14 +0800 | [diff] [blame] | 272 | rc = clear_entries(irq_iommu); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 273 |  | 
| Yinghai Lu | e420dfb | 2008-08-19 20:50:21 -0700 | [diff] [blame] | 274 | irq_iommu->iommu = NULL; | 
|  | 275 | irq_iommu->irte_index = 0; | 
|  | 276 | irq_iommu->sub_handle = 0; | 
|  | 277 | irq_iommu->irte_mask = 0; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 278 |  | 
| Thomas Gleixner | 96f8e98 | 2011-07-19 16:28:19 +0200 | [diff] [blame] | 279 | raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 280 |  | 
| Yu Zhao | 704126a | 2009-01-04 16:28:52 +0800 | [diff] [blame] | 281 | return rc; | 
| Suresh Siddha | b6fcb33 | 2008-07-10 11:16:44 -0700 | [diff] [blame] | 282 | } | 
|  | 283 |  | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 284 | /* | 
|  | 285 | * source validation type | 
|  | 286 | */ | 
|  | 287 | #define SVT_NO_VERIFY		0x0  /* no verification is required */ | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 288 | #define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */ | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 289 | #define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */ | 
|  | 290 |  | 
|  | 291 | /* | 
|  | 292 | * source-id qualifier | 
|  | 293 | */ | 
|  | 294 | #define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */ | 
|  | 295 | #define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore | 
|  | 296 | * the third least significant bit | 
|  | 297 | */ | 
|  | 298 | #define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore | 
|  | 299 | * the second and third least significant bits | 
|  | 300 | */ | 
|  | 301 | #define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore | 
|  | 302 | * the least three significant bits | 
|  | 303 | */ | 
|  | 304 |  | 
|  | 305 | /* | 
|  | 306 | * set SVT, SQ and SID fields of irte to verify | 
|  | 307 | * source ids of interrupt requests | 
|  | 308 | */ | 
|  | 309 | static void set_irte_sid(struct irte *irte, unsigned int svt, | 
|  | 310 | unsigned int sq, unsigned int sid) | 
|  | 311 | { | 
| Chris Wright | d1423d5 | 2010-07-20 11:06:49 -0700 | [diff] [blame] | 312 | if (disable_sourceid_checking) | 
|  | 313 | svt = SVT_NO_VERIFY; | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 314 | irte->svt = svt; | 
|  | 315 | irte->sq = sq; | 
|  | 316 | irte->sid = sid; | 
|  | 317 | } | 
|  | 318 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 319 | static int set_ioapic_sid(struct irte *irte, int apic) | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 320 | { | 
|  | 321 | int i; | 
|  | 322 | u16 sid = 0; | 
|  | 323 |  | 
|  | 324 | if (!irte) | 
|  | 325 | return -1; | 
|  | 326 |  | 
|  | 327 | for (i = 0; i < MAX_IO_APICS; i++) { | 
|  | 328 | if (ir_ioapic[i].id == apic) { | 
|  | 329 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | 
|  | 330 | break; | 
|  | 331 | } | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | if (sid == 0) { | 
|  | 335 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | 
|  | 336 | return -1; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | set_irte_sid(irte, 1, 0, sid); | 
|  | 340 |  | 
|  | 341 | return 0; | 
|  | 342 | } | 
|  | 343 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 344 | static int set_hpet_sid(struct irte *irte, u8 id) | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 345 | { | 
|  | 346 | int i; | 
|  | 347 | u16 sid = 0; | 
|  | 348 |  | 
|  | 349 | if (!irte) | 
|  | 350 | return -1; | 
|  | 351 |  | 
|  | 352 | for (i = 0; i < MAX_HPET_TBS; i++) { | 
|  | 353 | if (ir_hpet[i].id == id) { | 
|  | 354 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | 
|  | 355 | break; | 
|  | 356 | } | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | if (sid == 0) { | 
|  | 360 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | 
|  | 361 | return -1; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | /* | 
|  | 365 | * Should really use SQ_ALL_16. Some platforms are broken. | 
|  | 366 | * While we figure out the right quirks for these broken platforms, use | 
|  | 367 | * SQ_13_IGNORE_3 for now. | 
|  | 368 | */ | 
|  | 369 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | 
|  | 370 |  | 
|  | 371 | return 0; | 
|  | 372 | } | 
|  | 373 |  | 
| Joerg Roedel | 263b5e8 | 2012-03-30 11:47:06 -0700 | [diff] [blame] | 374 | static int set_msi_sid(struct irte *irte, struct pci_dev *dev) | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 375 | { | 
|  | 376 | struct pci_dev *bridge; | 
|  | 377 |  | 
|  | 378 | if (!irte || !dev) | 
|  | 379 | return -1; | 
|  | 380 |  | 
|  | 381 | /* PCIe device or Root Complex integrated PCI device */ | 
| Kenji Kaneshige | 5f4d91a | 2009-11-11 14:36:17 +0900 | [diff] [blame] | 382 | if (pci_is_pcie(dev) || !dev->bus->parent) { | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 383 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | 
|  | 384 | (dev->bus->number << 8) | dev->devfn); | 
|  | 385 | return 0; | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | bridge = pci_find_upstream_pcie_bridge(dev); | 
|  | 389 | if (bridge) { | 
| Stefan Assmann | 45e829e | 2009-12-03 06:49:24 -0500 | [diff] [blame] | 390 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 391 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, | 
|  | 392 | (bridge->bus->number << 8) | dev->bus->number); | 
|  | 393 | else /* this is a legacy PCI bridge */ | 
|  | 394 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | 
|  | 395 | (bridge->bus->number << 8) | bridge->devfn); | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | return 0; | 
|  | 399 | } | 
|  | 400 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 401 | static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 402 | { | 
|  | 403 | u64 addr; | 
| David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 404 | u32 sts; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 405 | unsigned long flags; | 
|  | 406 |  | 
|  | 407 | addr = virt_to_phys((void *)iommu->ir_table->base); | 
|  | 408 |  | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 409 | raw_spin_lock_irqsave(&iommu->register_lock, flags); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 410 |  | 
|  | 411 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | 
|  | 412 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | 
|  | 413 |  | 
|  | 414 | /* Set interrupt-remapping table pointer */ | 
| Han, Weidong | 161fde0 | 2009-04-03 17:15:47 +0800 | [diff] [blame] | 415 | iommu->gcmd |= DMA_GCMD_SIRTP; | 
| David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 416 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 417 |  | 
|  | 418 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 
|  | 419 | readl, (sts & DMA_GSTS_IRTPS), sts); | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 420 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 421 |  | 
|  | 422 | /* | 
|  | 423 | * global invalidation of interrupt entry cache before enabling | 
|  | 424 | * interrupt-remapping. | 
|  | 425 | */ | 
|  | 426 | qi_global_iec(iommu); | 
|  | 427 |  | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 428 | raw_spin_lock_irqsave(&iommu->register_lock, flags); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 429 |  | 
|  | 430 | /* Enable interrupt-remapping */ | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 431 | iommu->gcmd |= DMA_GCMD_IRE; | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 432 | iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */ | 
| David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 433 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 434 |  | 
|  | 435 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 
|  | 436 | readl, (sts & DMA_GSTS_IRES), sts); | 
|  | 437 |  | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 438 | /* | 
|  | 439 | * With CFI clear in the Global Command register, we should be | 
|  | 440 | * protected from dangerous (i.e. compatibility) interrupts | 
|  | 441 | * regardless of x2apic status.  Check just to be sure. | 
|  | 442 | */ | 
|  | 443 | if (sts & DMA_GSTS_CFIS) | 
|  | 444 | WARN(1, KERN_WARNING | 
|  | 445 | "Compatibility-format IRQs enabled despite intr remapping;\n" | 
|  | 446 | "you are vulnerable to IRQ injection.\n"); | 
|  | 447 |  | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 448 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 449 | } | 
|  | 450 |  | 
|  | 451 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 452 | static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 453 | { | 
|  | 454 | struct ir_table *ir_table; | 
|  | 455 | struct page *pages; | 
|  | 456 |  | 
|  | 457 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | 
| Suresh Siddha | fa4b57c | 2009-03-16 17:05:05 -0700 | [diff] [blame] | 458 | GFP_ATOMIC); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 459 |  | 
|  | 460 | if (!iommu->ir_table) | 
|  | 461 | return -ENOMEM; | 
|  | 462 |  | 
| Suresh Siddha | 824cd75 | 2009-10-02 11:01:23 -0700 | [diff] [blame] | 463 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, | 
|  | 464 | INTR_REMAP_PAGE_ORDER); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 465 |  | 
|  | 466 | if (!pages) { | 
|  | 467 | printk(KERN_ERR "failed to allocate pages of order %d\n", | 
|  | 468 | INTR_REMAP_PAGE_ORDER); | 
|  | 469 | kfree(iommu->ir_table); | 
|  | 470 | return -ENOMEM; | 
|  | 471 | } | 
|  | 472 |  | 
|  | 473 | ir_table->base = page_address(pages); | 
|  | 474 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 475 | iommu_set_irq_remapping(iommu, mode); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 476 | return 0; | 
|  | 477 | } | 
|  | 478 |  | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 479 | /* | 
|  | 480 | * Disable Interrupt Remapping. | 
|  | 481 | */ | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 482 | static void iommu_disable_irq_remapping(struct intel_iommu *iommu) | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 483 | { | 
|  | 484 | unsigned long flags; | 
|  | 485 | u32 sts; | 
|  | 486 |  | 
|  | 487 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 488 | return; | 
|  | 489 |  | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 490 | /* | 
|  | 491 | * global invalidation of interrupt entry cache before disabling | 
|  | 492 | * interrupt-remapping. | 
|  | 493 | */ | 
|  | 494 | qi_global_iec(iommu); | 
|  | 495 |  | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 496 | raw_spin_lock_irqsave(&iommu->register_lock, flags); | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 497 |  | 
|  | 498 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | 
|  | 499 | if (!(sts & DMA_GSTS_IRES)) | 
|  | 500 | goto end; | 
|  | 501 |  | 
|  | 502 | iommu->gcmd &= ~DMA_GCMD_IRE; | 
|  | 503 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | 
|  | 504 |  | 
|  | 505 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | 
|  | 506 | readl, !(sts & DMA_GSTS_IRES), sts); | 
|  | 507 |  | 
|  | 508 | end: | 
| Thomas Gleixner | 1f5b3c3 | 2011-07-19 16:19:51 +0200 | [diff] [blame] | 509 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 
| Suresh Siddha | eba67e5 | 2009-03-16 17:04:56 -0700 | [diff] [blame] | 510 | } | 
|  | 511 |  | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 512 | static int __init dmar_x2apic_optout(void) | 
|  | 513 | { | 
|  | 514 | struct acpi_table_dmar *dmar; | 
|  | 515 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 
|  | 516 | if (!dmar || no_x2apic_optout) | 
|  | 517 | return 0; | 
|  | 518 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | 
|  | 519 | } | 
|  | 520 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 521 | static int __init intel_irq_remapping_supported(void) | 
| Weidong Han | 9375823 | 2009-04-17 16:42:14 +0800 | [diff] [blame] | 522 | { | 
|  | 523 | struct dmar_drhd_unit *drhd; | 
|  | 524 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 525 | if (disable_irq_remap) | 
| Weidong Han | 03ea815 | 2009-04-17 16:42:15 +0800 | [diff] [blame] | 526 | return 0; | 
|  | 527 |  | 
| Youquan Song | 074835f | 2009-09-09 12:05:39 -0400 | [diff] [blame] | 528 | if (!dmar_ir_support()) | 
|  | 529 | return 0; | 
|  | 530 |  | 
| Weidong Han | 9375823 | 2009-04-17 16:42:14 +0800 | [diff] [blame] | 531 | for_each_drhd_unit(drhd) { | 
|  | 532 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 533 |  | 
|  | 534 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 535 | return 0; | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | return 1; | 
|  | 539 | } | 
|  | 540 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 541 | static int __init intel_enable_irq_remapping(void) | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 542 | { | 
|  | 543 | struct dmar_drhd_unit *drhd; | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 544 | bool x2apic_present; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 545 | int setup = 0; | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 546 | int eim = 0; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 547 |  | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 548 | x2apic_present = x2apic_supported(); | 
|  | 549 |  | 
| Youquan Song | e936d07 | 2009-09-07 10:58:07 -0400 | [diff] [blame] | 550 | if (parse_ioapics_under_ir() != 1) { | 
|  | 551 | printk(KERN_INFO "Not enable interrupt remapping\n"); | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 552 | goto error; | 
| Youquan Song | e936d07 | 2009-09-07 10:58:07 -0400 | [diff] [blame] | 553 | } | 
|  | 554 |  | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 555 | if (x2apic_present) { | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 556 | eim = !dmar_x2apic_optout(); | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 557 | if (!eim) | 
|  | 558 | printk(KERN_WARNING | 
|  | 559 | "Your BIOS is broken and requested that x2apic be disabled.\n" | 
|  | 560 | "This will slightly decrease performance.\n" | 
|  | 561 | "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 562 | } | 
|  | 563 |  | 
| Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 564 | for_each_drhd_unit(drhd) { | 
|  | 565 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 566 |  | 
|  | 567 | /* | 
| Han, Weidong | 34aaaa9 | 2009-04-04 17:21:26 +0800 | [diff] [blame] | 568 | * If the queued invalidation is already initialized, | 
|  | 569 | * shouldn't disable it. | 
|  | 570 | */ | 
|  | 571 | if (iommu->qi) | 
|  | 572 | continue; | 
|  | 573 |  | 
|  | 574 | /* | 
| Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 575 | * Clear previous faults. | 
|  | 576 | */ | 
|  | 577 | dmar_fault(-1, iommu); | 
|  | 578 |  | 
|  | 579 | /* | 
|  | 580 | * Disable intr remapping and queued invalidation, if already | 
|  | 581 | * enabled prior to OS handover. | 
|  | 582 | */ | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 583 | iommu_disable_irq_remapping(iommu); | 
| Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 584 |  | 
|  | 585 | dmar_disable_qi(iommu); | 
|  | 586 | } | 
|  | 587 |  | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 588 | /* | 
|  | 589 | * check for the Interrupt-remapping support | 
|  | 590 | */ | 
|  | 591 | for_each_drhd_unit(drhd) { | 
|  | 592 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 593 |  | 
|  | 594 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 595 | continue; | 
|  | 596 |  | 
|  | 597 | if (eim && !ecap_eim_support(iommu->ecap)) { | 
|  | 598 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | 
|  | 599 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 600 | goto error; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 601 | } | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | /* | 
|  | 605 | * Enable queued invalidation for all the DRHD's. | 
|  | 606 | */ | 
|  | 607 | for_each_drhd_unit(drhd) { | 
|  | 608 | int ret; | 
|  | 609 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 610 | ret = dmar_enable_qi(iommu); | 
|  | 611 |  | 
|  | 612 | if (ret) { | 
|  | 613 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | 
|  | 614 | " invalidation, ecap %Lx, ret %d\n", | 
|  | 615 | drhd->reg_base_addr, iommu->ecap, ret); | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 616 | goto error; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 617 | } | 
|  | 618 | } | 
|  | 619 |  | 
|  | 620 | /* | 
|  | 621 | * Setup Interrupt-remapping for all the DRHD's now. | 
|  | 622 | */ | 
|  | 623 | for_each_drhd_unit(drhd) { | 
|  | 624 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 625 |  | 
|  | 626 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 627 | continue; | 
|  | 628 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 629 | if (intel_setup_irq_remapping(iommu, eim)) | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 630 | goto error; | 
|  | 631 |  | 
|  | 632 | setup = 1; | 
|  | 633 | } | 
|  | 634 |  | 
|  | 635 | if (!setup) | 
|  | 636 | goto error; | 
|  | 637 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 638 | irq_remapping_enabled = 1; | 
| Joerg Roedel | afcc8a4 | 2012-09-26 12:44:36 +0200 | [diff] [blame] | 639 |  | 
|  | 640 | /* | 
|  | 641 | * VT-d has a different layout for IO-APIC entries when | 
|  | 642 | * interrupt remapping is enabled. So it needs a special routine | 
|  | 643 | * to print IO-APIC entries for debugging purposes too. | 
|  | 644 | */ | 
|  | 645 | x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; | 
|  | 646 |  | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 647 | pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 648 |  | 
| Suresh Siddha | 41750d3 | 2011-08-23 17:05:18 -0700 | [diff] [blame] | 649 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 650 |  | 
|  | 651 | error: | 
|  | 652 | /* | 
|  | 653 | * handle error condition gracefully here! | 
|  | 654 | */ | 
| Andy Lutomirski | af8d102 | 2013-02-01 14:57:43 -0800 | [diff] [blame] | 655 |  | 
|  | 656 | if (x2apic_present) | 
|  | 657 | WARN(1, KERN_WARNING | 
|  | 658 | "Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n"); | 
|  | 659 |  | 
| Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 660 | return -1; | 
|  | 661 | } | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 662 |  | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 663 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, | 
|  | 664 | struct intel_iommu *iommu) | 
|  | 665 | { | 
|  | 666 | struct acpi_dmar_pci_path *path; | 
|  | 667 | u8 bus; | 
|  | 668 | int count; | 
|  | 669 |  | 
|  | 670 | bus = scope->bus; | 
|  | 671 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 
|  | 672 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | 
|  | 673 | / sizeof(struct acpi_dmar_pci_path); | 
|  | 674 |  | 
|  | 675 | while (--count > 0) { | 
|  | 676 | /* | 
|  | 677 | * Access PCI directly due to the PCI | 
|  | 678 | * subsystem isn't initialized yet. | 
|  | 679 | */ | 
|  | 680 | bus = read_pci_config_byte(bus, path->dev, path->fn, | 
|  | 681 | PCI_SECONDARY_BUS); | 
|  | 682 | path++; | 
|  | 683 | } | 
|  | 684 | ir_hpet[ir_hpet_num].bus   = bus; | 
|  | 685 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | 
|  | 686 | ir_hpet[ir_hpet_num].iommu = iommu; | 
|  | 687 | ir_hpet[ir_hpet_num].id    = scope->enumeration_id; | 
|  | 688 | ir_hpet_num++; | 
|  | 689 | } | 
|  | 690 |  | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 691 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, | 
|  | 692 | struct intel_iommu *iommu) | 
|  | 693 | { | 
|  | 694 | struct acpi_dmar_pci_path *path; | 
|  | 695 | u8 bus; | 
|  | 696 | int count; | 
|  | 697 |  | 
|  | 698 | bus = scope->bus; | 
|  | 699 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 
|  | 700 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | 
|  | 701 | / sizeof(struct acpi_dmar_pci_path); | 
|  | 702 |  | 
|  | 703 | while (--count > 0) { | 
|  | 704 | /* | 
|  | 705 | * Access PCI directly due to the PCI | 
|  | 706 | * subsystem isn't initialized yet. | 
|  | 707 | */ | 
|  | 708 | bus = read_pci_config_byte(bus, path->dev, path->fn, | 
|  | 709 | PCI_SECONDARY_BUS); | 
|  | 710 | path++; | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | ir_ioapic[ir_ioapic_num].bus   = bus; | 
|  | 714 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | 
|  | 715 | ir_ioapic[ir_ioapic_num].iommu = iommu; | 
|  | 716 | ir_ioapic[ir_ioapic_num].id    = scope->enumeration_id; | 
|  | 717 | ir_ioapic_num++; | 
|  | 718 | } | 
|  | 719 |  | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 720 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, | 
|  | 721 | struct intel_iommu *iommu) | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 722 | { | 
|  | 723 | struct acpi_dmar_hardware_unit *drhd; | 
|  | 724 | struct acpi_dmar_device_scope *scope; | 
|  | 725 | void *start, *end; | 
|  | 726 |  | 
|  | 727 | drhd = (struct acpi_dmar_hardware_unit *)header; | 
|  | 728 |  | 
|  | 729 | start = (void *)(drhd + 1); | 
|  | 730 | end = ((void *)drhd) + header->length; | 
|  | 731 |  | 
|  | 732 | while (start < end) { | 
|  | 733 | scope = start; | 
|  | 734 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | 
|  | 735 | if (ir_ioapic_num == MAX_IO_APICS) { | 
|  | 736 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | 
|  | 737 | return -1; | 
|  | 738 | } | 
|  | 739 |  | 
| Yinghai Lu | 680a752 | 2010-04-08 19:58:23 +0100 | [diff] [blame] | 740 | printk(KERN_INFO "IOAPIC id %d under DRHD base " | 
|  | 741 | " 0x%Lx IOMMU %d\n", scope->enumeration_id, | 
|  | 742 | drhd->address, iommu->seq_id); | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 743 |  | 
| Weidong Han | f007e99 | 2009-05-23 00:41:15 +0800 | [diff] [blame] | 744 | ir_parse_one_ioapic_scope(scope, iommu); | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 745 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { | 
|  | 746 | if (ir_hpet_num == MAX_HPET_TBS) { | 
|  | 747 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | 
|  | 748 | return -1; | 
|  | 749 | } | 
|  | 750 |  | 
|  | 751 | printk(KERN_INFO "HPET id %d under DRHD base" | 
|  | 752 | " 0x%Lx\n", scope->enumeration_id, | 
|  | 753 | drhd->address); | 
|  | 754 |  | 
|  | 755 | ir_parse_one_hpet_scope(scope, iommu); | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 756 | } | 
|  | 757 | start += scope->length; | 
|  | 758 | } | 
|  | 759 |  | 
|  | 760 | return 0; | 
|  | 761 | } | 
|  | 762 |  | 
|  | 763 | /* | 
|  | 764 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | 
|  | 765 | * hardware unit. | 
|  | 766 | */ | 
|  | 767 | int __init parse_ioapics_under_ir(void) | 
|  | 768 | { | 
|  | 769 | struct dmar_drhd_unit *drhd; | 
|  | 770 | int ir_supported = 0; | 
| Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 771 | int ioapic_idx; | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 772 |  | 
|  | 773 | for_each_drhd_unit(drhd) { | 
|  | 774 | struct intel_iommu *iommu = drhd->iommu; | 
|  | 775 |  | 
|  | 776 | if (ecap_ir_support(iommu->ecap)) { | 
| Suresh Siddha | 20f3097 | 2009-08-04 12:07:08 -0700 | [diff] [blame] | 777 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 778 | return -1; | 
|  | 779 |  | 
|  | 780 | ir_supported = 1; | 
|  | 781 | } | 
|  | 782 | } | 
|  | 783 |  | 
| Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 784 | if (!ir_supported) | 
|  | 785 | return 0; | 
|  | 786 |  | 
|  | 787 | for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { | 
|  | 788 | int ioapic_id = mpc_ioapic_id(ioapic_idx); | 
|  | 789 | if (!map_ioapic_to_ir(ioapic_id)) { | 
|  | 790 | pr_err(FW_BUG "ioapic %d has no mapping iommu, " | 
|  | 791 | "interrupt remapping will be disabled\n", | 
|  | 792 | ioapic_id); | 
|  | 793 | return -1; | 
|  | 794 | } | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 795 | } | 
|  | 796 |  | 
| Seth Forshee | 32ab31e | 2012-08-08 08:27:03 -0500 | [diff] [blame] | 797 | return 1; | 
| Suresh Siddha | ad3ad3f | 2008-07-10 11:16:40 -0700 | [diff] [blame] | 798 | } | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 799 |  | 
| Sergey Senozhatsky | 61ed26e | 2011-10-26 19:15:07 +0300 | [diff] [blame] | 800 | int __init ir_dev_scope_init(void) | 
| Suresh Siddha | c2c7286 | 2011-08-23 17:05:19 -0700 | [diff] [blame] | 801 | { | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 802 | if (!irq_remapping_enabled) | 
| Suresh Siddha | c2c7286 | 2011-08-23 17:05:19 -0700 | [diff] [blame] | 803 | return 0; | 
|  | 804 |  | 
|  | 805 | return dmar_dev_scope_init(); | 
|  | 806 | } | 
|  | 807 | rootfs_initcall(ir_dev_scope_init); | 
|  | 808 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 809 | static void disable_irq_remapping(void) | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 810 | { | 
|  | 811 | struct dmar_drhd_unit *drhd; | 
|  | 812 | struct intel_iommu *iommu = NULL; | 
|  | 813 |  | 
|  | 814 | /* | 
|  | 815 | * Disable Interrupt-remapping for all the DRHD's now. | 
|  | 816 | */ | 
|  | 817 | for_each_iommu(iommu, drhd) { | 
|  | 818 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 819 | continue; | 
|  | 820 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 821 | iommu_disable_irq_remapping(iommu); | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 822 | } | 
|  | 823 | } | 
|  | 824 |  | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 825 | static int reenable_irq_remapping(int eim) | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 826 | { | 
|  | 827 | struct dmar_drhd_unit *drhd; | 
|  | 828 | int setup = 0; | 
|  | 829 | struct intel_iommu *iommu = NULL; | 
|  | 830 |  | 
|  | 831 | for_each_iommu(iommu, drhd) | 
|  | 832 | if (iommu->qi) | 
|  | 833 | dmar_reenable_qi(iommu); | 
|  | 834 |  | 
|  | 835 | /* | 
|  | 836 | * Setup Interrupt-remapping for all the DRHD's now. | 
|  | 837 | */ | 
|  | 838 | for_each_iommu(iommu, drhd) { | 
|  | 839 | if (!ecap_ir_support(iommu->ecap)) | 
|  | 840 | continue; | 
|  | 841 |  | 
|  | 842 | /* Set up interrupt remapping for iommu.*/ | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 843 | iommu_set_irq_remapping(iommu, eim); | 
| Fenghua Yu | b24696b | 2009-03-27 14:22:44 -0700 | [diff] [blame] | 844 | setup = 1; | 
|  | 845 | } | 
|  | 846 |  | 
|  | 847 | if (!setup) | 
|  | 848 | goto error; | 
|  | 849 |  | 
|  | 850 | return 0; | 
|  | 851 |  | 
|  | 852 | error: | 
|  | 853 | /* | 
|  | 854 | * handle error condition gracefully here! | 
|  | 855 | */ | 
|  | 856 | return -1; | 
|  | 857 | } | 
|  | 858 |  | 
| Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 859 | static void prepare_irte(struct irte *irte, int vector, | 
|  | 860 | unsigned int dest) | 
|  | 861 | { | 
|  | 862 | memset(irte, 0, sizeof(*irte)); | 
|  | 863 |  | 
|  | 864 | irte->present = 1; | 
|  | 865 | irte->dst_mode = apic->irq_dest_mode; | 
|  | 866 | /* | 
|  | 867 | * Trigger mode in the IRTE will always be edge, and for IO-APIC, the | 
|  | 868 | * actual level or edge trigger will be setup in the IO-APIC | 
|  | 869 | * RTE. This will help simplify level triggered irq migration. | 
|  | 870 | * For more details, see the comments (in io_apic.c) explainig IO-APIC | 
|  | 871 | * irq migration in the presence of interrupt-remapping. | 
|  | 872 | */ | 
|  | 873 | irte->trigger_mode = 0; | 
|  | 874 | irte->dlvry_mode = apic->irq_delivery_mode; | 
|  | 875 | irte->vector = vector; | 
|  | 876 | irte->dest_id = IRTE_DEST(dest); | 
|  | 877 | irte->redir_hint = 1; | 
|  | 878 | } | 
|  | 879 |  | 
|  | 880 | static int intel_setup_ioapic_entry(int irq, | 
|  | 881 | struct IO_APIC_route_entry *route_entry, | 
|  | 882 | unsigned int destination, int vector, | 
|  | 883 | struct io_apic_irq_attr *attr) | 
|  | 884 | { | 
|  | 885 | int ioapic_id = mpc_ioapic_id(attr->ioapic); | 
|  | 886 | struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); | 
|  | 887 | struct IR_IO_APIC_route_entry *entry; | 
|  | 888 | struct irte irte; | 
|  | 889 | int index; | 
|  | 890 |  | 
|  | 891 | if (!iommu) { | 
|  | 892 | pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); | 
|  | 893 | return -ENODEV; | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | entry = (struct IR_IO_APIC_route_entry *)route_entry; | 
|  | 897 |  | 
|  | 898 | index = alloc_irte(iommu, irq, 1); | 
|  | 899 | if (index < 0) { | 
|  | 900 | pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); | 
|  | 901 | return -ENOMEM; | 
|  | 902 | } | 
|  | 903 |  | 
|  | 904 | prepare_irte(&irte, vector, destination); | 
|  | 905 |  | 
|  | 906 | /* Set source-id of interrupt request */ | 
|  | 907 | set_ioapic_sid(&irte, ioapic_id); | 
|  | 908 |  | 
|  | 909 | modify_irte(irq, &irte); | 
|  | 910 |  | 
|  | 911 | apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " | 
|  | 912 | "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " | 
|  | 913 | "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " | 
|  | 914 | "Avail:%X Vector:%02X Dest:%08X " | 
|  | 915 | "SID:%04X SQ:%X SVT:%X)\n", | 
|  | 916 | attr->ioapic, irte.present, irte.fpd, irte.dst_mode, | 
|  | 917 | irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, | 
|  | 918 | irte.avail, irte.vector, irte.dest_id, | 
|  | 919 | irte.sid, irte.sq, irte.svt); | 
|  | 920 |  | 
|  | 921 | memset(entry, 0, sizeof(*entry)); | 
|  | 922 |  | 
|  | 923 | entry->index2	= (index >> 15) & 0x1; | 
|  | 924 | entry->zero	= 0; | 
|  | 925 | entry->format	= 1; | 
|  | 926 | entry->index	= (index & 0x7fff); | 
|  | 927 | /* | 
|  | 928 | * IO-APIC RTE will be configured with virtual vector. | 
|  | 929 | * irq handler will do the explicit EOI to the io-apic. | 
|  | 930 | */ | 
|  | 931 | entry->vector	= attr->ioapic_pin; | 
|  | 932 | entry->mask	= 0;			/* enable IRQ */ | 
|  | 933 | entry->trigger	= attr->trigger; | 
|  | 934 | entry->polarity	= attr->polarity; | 
|  | 935 |  | 
|  | 936 | /* Mask level triggered irqs. | 
|  | 937 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | 
|  | 938 | */ | 
|  | 939 | if (attr->trigger) | 
|  | 940 | entry->mask = 1; | 
|  | 941 |  | 
|  | 942 | return 0; | 
|  | 943 | } | 
|  | 944 |  | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 945 | /* | 
|  | 946 | * Migrate the IO-APIC irq in the presence of intr-remapping. | 
|  | 947 | * | 
|  | 948 | * For both level and edge triggered, irq migration is a simple atomic | 
|  | 949 | * update(of vector and cpu destination) of IRTE and flush the hardware cache. | 
|  | 950 | * | 
|  | 951 | * For level triggered, we eliminate the io-apic RTE modification (with the | 
|  | 952 | * updated vector information), by using a virtual vector (io-apic pin number). | 
|  | 953 | * Real vector that is used for interrupting cpu will be coming from | 
|  | 954 | * the interrupt-remapping table entry. | 
|  | 955 | * | 
|  | 956 | * As the migration is a simple atomic update of IRTE, the same mechanism | 
|  | 957 | * is used to migrate MSI irq's in the presence of interrupt-remapping. | 
|  | 958 | */ | 
|  | 959 | static int | 
|  | 960 | intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | 
|  | 961 | bool force) | 
|  | 962 | { | 
|  | 963 | struct irq_cfg *cfg = data->chip_data; | 
|  | 964 | unsigned int dest, irq = data->irq; | 
|  | 965 | struct irte irte; | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 966 | int err; | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 967 |  | 
| Suresh Siddha | 7eb9ae0 | 2012-06-14 18:28:49 -0700 | [diff] [blame] | 968 | if (!config_enabled(CONFIG_SMP)) | 
|  | 969 | return -EINVAL; | 
|  | 970 |  | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 971 | if (!cpumask_intersects(mask, cpu_online_mask)) | 
|  | 972 | return -EINVAL; | 
|  | 973 |  | 
|  | 974 | if (get_irte(irq, &irte)) | 
|  | 975 | return -EBUSY; | 
|  | 976 |  | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 977 | err = assign_irq_vector(irq, cfg, mask); | 
|  | 978 | if (err) | 
|  | 979 | return err; | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 980 |  | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 981 | err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); | 
|  | 982 | if (err) { | 
| Dan Carpenter | ed88bed | 2012-06-12 19:26:33 +0300 | [diff] [blame] | 983 | if (assign_irq_vector(irq, cfg, data->affinity)) | 
| Alexander Gordeev | ff16432 | 2012-06-07 15:15:59 +0200 | [diff] [blame] | 984 | pr_err("Failed to recover vector for irq %d\n", irq); | 
|  | 985 | return err; | 
|  | 986 | } | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 987 |  | 
|  | 988 | irte.vector = cfg->vector; | 
|  | 989 | irte.dest_id = IRTE_DEST(dest); | 
|  | 990 |  | 
|  | 991 | /* | 
|  | 992 | * Atomically updates the IRTE with the new destination, vector | 
|  | 993 | * and flushes the interrupt entry cache. | 
|  | 994 | */ | 
|  | 995 | modify_irte(irq, &irte); | 
|  | 996 |  | 
|  | 997 | /* | 
|  | 998 | * After this point, all the interrupts will start arriving | 
|  | 999 | * at the new destination. So, time to cleanup the previous | 
|  | 1000 | * vector allocation. | 
|  | 1001 | */ | 
|  | 1002 | if (cfg->move_in_progress) | 
|  | 1003 | send_cleanup_vector(cfg); | 
|  | 1004 |  | 
|  | 1005 | cpumask_copy(data->affinity, mask); | 
|  | 1006 | return 0; | 
|  | 1007 | } | 
| Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 1008 |  | 
| Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1009 | static void intel_compose_msi_msg(struct pci_dev *pdev, | 
|  | 1010 | unsigned int irq, unsigned int dest, | 
|  | 1011 | struct msi_msg *msg, u8 hpet_id) | 
|  | 1012 | { | 
|  | 1013 | struct irq_cfg *cfg; | 
|  | 1014 | struct irte irte; | 
| Suresh Siddha | c558df4 | 2012-05-08 00:08:54 -0700 | [diff] [blame] | 1015 | u16 sub_handle = 0; | 
| Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1016 | int ir_index; | 
|  | 1017 |  | 
|  | 1018 | cfg = irq_get_chip_data(irq); | 
|  | 1019 |  | 
|  | 1020 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | 
|  | 1021 | BUG_ON(ir_index == -1); | 
|  | 1022 |  | 
|  | 1023 | prepare_irte(&irte, cfg->vector, dest); | 
|  | 1024 |  | 
|  | 1025 | /* Set source-id of interrupt request */ | 
|  | 1026 | if (pdev) | 
|  | 1027 | set_msi_sid(&irte, pdev); | 
|  | 1028 | else | 
|  | 1029 | set_hpet_sid(&irte, hpet_id); | 
|  | 1030 |  | 
|  | 1031 | modify_irte(irq, &irte); | 
|  | 1032 |  | 
|  | 1033 | msg->address_hi = MSI_ADDR_BASE_HI; | 
|  | 1034 | msg->data = sub_handle; | 
|  | 1035 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | 
|  | 1036 | MSI_ADDR_IR_SHV | | 
|  | 1037 | MSI_ADDR_IR_INDEX1(ir_index) | | 
|  | 1038 | MSI_ADDR_IR_INDEX2(ir_index); | 
|  | 1039 | } | 
|  | 1040 |  | 
|  | 1041 | /* | 
|  | 1042 | * Map the PCI dev to the corresponding remapping hardware unit | 
|  | 1043 | * and allocate 'nvec' consecutive interrupt-remapping table entries | 
|  | 1044 | * in it. | 
|  | 1045 | */ | 
|  | 1046 | static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) | 
|  | 1047 | { | 
|  | 1048 | struct intel_iommu *iommu; | 
|  | 1049 | int index; | 
|  | 1050 |  | 
|  | 1051 | iommu = map_dev_to_ir(dev); | 
|  | 1052 | if (!iommu) { | 
|  | 1053 | printk(KERN_ERR | 
|  | 1054 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | 
|  | 1055 | return -ENOENT; | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | index = alloc_irte(iommu, irq, nvec); | 
|  | 1059 | if (index < 0) { | 
|  | 1060 | printk(KERN_ERR | 
|  | 1061 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | 
|  | 1062 | pci_name(dev)); | 
|  | 1063 | return -ENOSPC; | 
|  | 1064 | } | 
|  | 1065 | return index; | 
|  | 1066 | } | 
|  | 1067 |  | 
|  | 1068 | static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | 
|  | 1069 | int index, int sub_handle) | 
|  | 1070 | { | 
|  | 1071 | struct intel_iommu *iommu; | 
|  | 1072 |  | 
|  | 1073 | iommu = map_dev_to_ir(pdev); | 
|  | 1074 | if (!iommu) | 
|  | 1075 | return -ENOENT; | 
|  | 1076 | /* | 
|  | 1077 | * setup the mapping between the irq and the IRTE | 
|  | 1078 | * base index, the sub_handle pointing to the | 
|  | 1079 | * appropriate interrupt remap table entry. | 
|  | 1080 | */ | 
|  | 1081 | set_irte_irq(irq, iommu, index, sub_handle); | 
|  | 1082 |  | 
|  | 1083 | return 0; | 
|  | 1084 | } | 
|  | 1085 |  | 
|  | 1086 | static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) | 
|  | 1087 | { | 
|  | 1088 | struct intel_iommu *iommu = map_hpet_to_ir(id); | 
|  | 1089 | int index; | 
|  | 1090 |  | 
|  | 1091 | if (!iommu) | 
|  | 1092 | return -1; | 
|  | 1093 |  | 
|  | 1094 | index = alloc_irte(iommu, irq, 1); | 
|  | 1095 | if (index < 0) | 
|  | 1096 | return -1; | 
|  | 1097 |  | 
|  | 1098 | return 0; | 
|  | 1099 | } | 
|  | 1100 |  | 
| Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 1101 | struct irq_remap_ops intel_irq_remap_ops = { | 
| Suresh Siddha | 95a02e9 | 2012-03-30 11:47:07 -0700 | [diff] [blame] | 1102 | .supported		= intel_irq_remapping_supported, | 
|  | 1103 | .prepare		= dmar_table_init, | 
|  | 1104 | .enable			= intel_enable_irq_remapping, | 
|  | 1105 | .disable		= disable_irq_remapping, | 
|  | 1106 | .reenable		= reenable_irq_remapping, | 
| Joerg Roedel | 4f3d8b6 | 2012-03-30 11:47:01 -0700 | [diff] [blame] | 1107 | .enable_faulting	= enable_drhd_fault_handling, | 
| Joerg Roedel | 0c3f173 | 2012-03-30 11:47:02 -0700 | [diff] [blame] | 1108 | .setup_ioapic_entry	= intel_setup_ioapic_entry, | 
| Joerg Roedel | 4c1bad6 | 2012-03-30 11:47:03 -0700 | [diff] [blame] | 1109 | .set_affinity		= intel_ioapic_set_affinity, | 
| Joerg Roedel | 9d619f6 | 2012-03-30 11:47:04 -0700 | [diff] [blame] | 1110 | .free_irq		= free_irte, | 
| Joerg Roedel | 5e2b930 | 2012-03-30 11:47:05 -0700 | [diff] [blame] | 1111 | .compose_msi_msg	= intel_compose_msi_msg, | 
|  | 1112 | .msi_alloc_irq		= intel_msi_alloc_irq, | 
|  | 1113 | .msi_setup_irq		= intel_msi_setup_irq, | 
|  | 1114 | .setup_hpet_msi		= intel_setup_hpet_msi, | 
| Joerg Roedel | 736baef | 2012-03-30 11:47:00 -0700 | [diff] [blame] | 1115 | }; |