Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 17 | * Copyright (C) 2006-2008 Intel Corporation |
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 21 | * Author: Fenghua Yu <fenghua.yu@intel.com> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 22 | */ |
| 23 | |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/bitmap.h> |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 26 | #include <linux/debugfs.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 27 | #include <linux/slab.h> |
| 28 | #include <linux/irq.h> |
| 29 | #include <linux/interrupt.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 30 | #include <linux/spinlock.h> |
| 31 | #include <linux/pci.h> |
| 32 | #include <linux/dmar.h> |
| 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/mempool.h> |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 35 | #include <linux/timer.h> |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 36 | #include <linux/iova.h> |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 37 | #include <linux/iommu.h> |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 38 | #include <linux/intel-iommu.h> |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 39 | #include <linux/sysdev.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 40 | #include <asm/cacheflush.h> |
FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 41 | #include <asm/iommu.h> |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 42 | #include <asm/e820.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 43 | #include "pci.h" |
| 44 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 45 | #define ROOT_SIZE VTD_PAGE_SIZE |
| 46 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
| 47 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 48 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
| 49 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
| 50 | |
| 51 | #define IOAPIC_RANGE_START (0xfee00000) |
| 52 | #define IOAPIC_RANGE_END (0xfeefffff) |
| 53 | #define IOVA_START_ADDR (0x1000) |
| 54 | |
| 55 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
| 56 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 57 | #define MAX_AGAW_WIDTH 64 |
| 58 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 59 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
| 60 | |
Mark McLoughlin | f27be03 | 2008-11-20 15:49:43 +0000 | [diff] [blame] | 61 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 62 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
Yang Hongyang | 6a35528 | 2009-04-06 19:01:13 -0700 | [diff] [blame] | 63 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 64 | |
David Woodhouse | fd18de5 | 2009-05-10 23:57:41 +0100 | [diff] [blame] | 65 | #ifndef PHYSICAL_PAGE_MASK |
| 66 | #define PHYSICAL_PAGE_MASK PAGE_MASK |
| 67 | #endif |
| 68 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 69 | /* global iommu list, set NULL for ignored DMAR units */ |
| 70 | static struct intel_iommu **g_iommus; |
| 71 | |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 72 | static int rwbf_quirk; |
| 73 | |
Mark McLoughlin | 46b08e1 | 2008-11-20 15:49:44 +0000 | [diff] [blame] | 74 | /* |
| 75 | * 0: Present |
| 76 | * 1-11: Reserved |
| 77 | * 12-63: Context Ptr (12 - (haw-1)) |
| 78 | * 64-127: Reserved |
| 79 | */ |
| 80 | struct root_entry { |
| 81 | u64 val; |
| 82 | u64 rsvd1; |
| 83 | }; |
| 84 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
| 85 | static inline bool root_present(struct root_entry *root) |
| 86 | { |
| 87 | return (root->val & 1); |
| 88 | } |
| 89 | static inline void set_root_present(struct root_entry *root) |
| 90 | { |
| 91 | root->val |= 1; |
| 92 | } |
| 93 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
| 94 | { |
| 95 | root->val |= value & VTD_PAGE_MASK; |
| 96 | } |
| 97 | |
| 98 | static inline struct context_entry * |
| 99 | get_context_addr_from_root(struct root_entry *root) |
| 100 | { |
| 101 | return (struct context_entry *) |
| 102 | (root_present(root)?phys_to_virt( |
| 103 | root->val & VTD_PAGE_MASK) : |
| 104 | NULL); |
| 105 | } |
| 106 | |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 107 | /* |
| 108 | * low 64 bits: |
| 109 | * 0: present |
| 110 | * 1: fault processing disable |
| 111 | * 2-3: translation type |
| 112 | * 12-63: address space root |
| 113 | * high 64 bits: |
| 114 | * 0-2: address width |
| 115 | * 3-6: aval |
| 116 | * 8-23: domain id |
| 117 | */ |
| 118 | struct context_entry { |
| 119 | u64 lo; |
| 120 | u64 hi; |
| 121 | }; |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 122 | |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 123 | static inline bool context_present(struct context_entry *context) |
| 124 | { |
| 125 | return (context->lo & 1); |
| 126 | } |
| 127 | static inline void context_set_present(struct context_entry *context) |
| 128 | { |
| 129 | context->lo |= 1; |
| 130 | } |
| 131 | |
| 132 | static inline void context_set_fault_enable(struct context_entry *context) |
| 133 | { |
| 134 | context->lo &= (((u64)-1) << 2) | 1; |
| 135 | } |
| 136 | |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 137 | static inline void context_set_translation_type(struct context_entry *context, |
| 138 | unsigned long value) |
| 139 | { |
| 140 | context->lo &= (((u64)-1) << 4) | 3; |
| 141 | context->lo |= (value & 3) << 2; |
| 142 | } |
| 143 | |
| 144 | static inline void context_set_address_root(struct context_entry *context, |
| 145 | unsigned long value) |
| 146 | { |
| 147 | context->lo |= value & VTD_PAGE_MASK; |
| 148 | } |
| 149 | |
| 150 | static inline void context_set_address_width(struct context_entry *context, |
| 151 | unsigned long value) |
| 152 | { |
| 153 | context->hi |= value & 7; |
| 154 | } |
| 155 | |
| 156 | static inline void context_set_domain_id(struct context_entry *context, |
| 157 | unsigned long value) |
| 158 | { |
| 159 | context->hi |= (value & ((1 << 16) - 1)) << 8; |
| 160 | } |
| 161 | |
| 162 | static inline void context_clear_entry(struct context_entry *context) |
| 163 | { |
| 164 | context->lo = 0; |
| 165 | context->hi = 0; |
| 166 | } |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 167 | |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 168 | /* |
| 169 | * 0: readable |
| 170 | * 1: writable |
| 171 | * 2-6: reserved |
| 172 | * 7: super page |
Sheng Yang | 9cf0669 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 173 | * 8-10: available |
| 174 | * 11: snoop behavior |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 175 | * 12-63: Host physcial address |
| 176 | */ |
| 177 | struct dma_pte { |
| 178 | u64 val; |
| 179 | }; |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 180 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 181 | static inline void dma_clear_pte(struct dma_pte *pte) |
| 182 | { |
| 183 | pte->val = 0; |
| 184 | } |
| 185 | |
| 186 | static inline void dma_set_pte_readable(struct dma_pte *pte) |
| 187 | { |
| 188 | pte->val |= DMA_PTE_READ; |
| 189 | } |
| 190 | |
| 191 | static inline void dma_set_pte_writable(struct dma_pte *pte) |
| 192 | { |
| 193 | pte->val |= DMA_PTE_WRITE; |
| 194 | } |
| 195 | |
Sheng Yang | 9cf0669 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 196 | static inline void dma_set_pte_snp(struct dma_pte *pte) |
| 197 | { |
| 198 | pte->val |= DMA_PTE_SNP; |
| 199 | } |
| 200 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 201 | static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) |
| 202 | { |
| 203 | pte->val = (pte->val & ~3) | (prot & 3); |
| 204 | } |
| 205 | |
| 206 | static inline u64 dma_pte_addr(struct dma_pte *pte) |
| 207 | { |
| 208 | return (pte->val & VTD_PAGE_MASK); |
| 209 | } |
| 210 | |
| 211 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) |
| 212 | { |
| 213 | pte->val |= (addr & VTD_PAGE_MASK); |
| 214 | } |
| 215 | |
| 216 | static inline bool dma_pte_present(struct dma_pte *pte) |
| 217 | { |
| 218 | return (pte->val & 3) != 0; |
| 219 | } |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 220 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 221 | /* |
| 222 | * This domain is a statically identity mapping domain. |
| 223 | * 1. This domain creats a static 1:1 mapping to all usable memory. |
| 224 | * 2. It maps to each iommu if successful. |
| 225 | * 3. Each iommu mapps to this domain if successful. |
| 226 | */ |
| 227 | struct dmar_domain *si_domain; |
| 228 | |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 229 | /* devices under the same p2p bridge are owned in one domain */ |
Mike Day | cdc7b83 | 2008-12-12 17:16:30 +0100 | [diff] [blame] | 230 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 231 | |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 232 | /* domain represents a virtual machine, more than one devices |
| 233 | * across iommus may be owned in one domain, e.g. kvm guest. |
| 234 | */ |
| 235 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) |
| 236 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 237 | /* si_domain contains mulitple devices */ |
| 238 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) |
| 239 | |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 240 | struct dmar_domain { |
| 241 | int id; /* domain id */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 242 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 243 | |
| 244 | struct list_head devices; /* all devices' list */ |
| 245 | struct iova_domain iovad; /* iova's that belong to this domain */ |
| 246 | |
| 247 | struct dma_pte *pgd; /* virtual address */ |
| 248 | spinlock_t mapping_lock; /* page table lock */ |
| 249 | int gaw; /* max guest address width */ |
| 250 | |
| 251 | /* adjusted guest address width, 0 is level 2 30-bit */ |
| 252 | int agaw; |
| 253 | |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 254 | int flags; /* flags to find out type of domain */ |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 255 | |
| 256 | int iommu_coherency;/* indicate coherency of iommu access */ |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 257 | int iommu_snooping; /* indicate snooping control feature*/ |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 258 | int iommu_count; /* reference count of iommu */ |
| 259 | spinlock_t iommu_lock; /* protect iommu set in domain */ |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 260 | u64 max_addr; /* maximum mapped address */ |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 261 | }; |
| 262 | |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 263 | /* PCI domain-device relationship */ |
| 264 | struct device_domain_info { |
| 265 | struct list_head link; /* link to domain siblings */ |
| 266 | struct list_head global; /* link to global list */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 267 | int segment; /* PCI domain */ |
| 268 | u8 bus; /* PCI bus number */ |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 269 | u8 devfn; /* PCI devfn number */ |
| 270 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 271 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 272 | struct dmar_domain *domain; /* pointer to domain */ |
| 273 | }; |
| 274 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 275 | static void flush_unmaps_timeout(unsigned long data); |
| 276 | |
| 277 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
| 278 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 279 | #define HIGH_WATER_MARK 250 |
| 280 | struct deferred_flush_tables { |
| 281 | int next; |
| 282 | struct iova *iova[HIGH_WATER_MARK]; |
| 283 | struct dmar_domain *domain[HIGH_WATER_MARK]; |
| 284 | }; |
| 285 | |
| 286 | static struct deferred_flush_tables *deferred_flush; |
| 287 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 288 | /* bitmap for indexing intel_iommus */ |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 289 | static int g_num_of_iommus; |
| 290 | |
| 291 | static DEFINE_SPINLOCK(async_umap_flush_lock); |
| 292 | static LIST_HEAD(unmaps_to_do); |
| 293 | |
| 294 | static int timer_on; |
| 295 | static long list_size; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 296 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 297 | static void domain_remove_dev_info(struct dmar_domain *domain); |
| 298 | |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_DMAR_DEFAULT_ON |
| 300 | int dmar_disabled = 0; |
| 301 | #else |
| 302 | int dmar_disabled = 1; |
| 303 | #endif /*CONFIG_DMAR_DEFAULT_ON*/ |
| 304 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 305 | static int __initdata dmar_map_gfx = 1; |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 306 | static int dmar_forcedac; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 307 | static int intel_iommu_strict; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 308 | |
| 309 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
| 310 | static DEFINE_SPINLOCK(device_domain_lock); |
| 311 | static LIST_HEAD(device_domain_list); |
| 312 | |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 313 | static struct iommu_ops intel_iommu_ops; |
| 314 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 315 | static int __init intel_iommu_setup(char *str) |
| 316 | { |
| 317 | if (!str) |
| 318 | return -EINVAL; |
| 319 | while (*str) { |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 320 | if (!strncmp(str, "on", 2)) { |
| 321 | dmar_disabled = 0; |
| 322 | printk(KERN_INFO "Intel-IOMMU: enabled\n"); |
| 323 | } else if (!strncmp(str, "off", 3)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 324 | dmar_disabled = 1; |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 325 | printk(KERN_INFO "Intel-IOMMU: disabled\n"); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 326 | } else if (!strncmp(str, "igfx_off", 8)) { |
| 327 | dmar_map_gfx = 0; |
| 328 | printk(KERN_INFO |
| 329 | "Intel-IOMMU: disable GFX device mapping\n"); |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 330 | } else if (!strncmp(str, "forcedac", 8)) { |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 331 | printk(KERN_INFO |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 332 | "Intel-IOMMU: Forcing DAC for PCI devices\n"); |
| 333 | dmar_forcedac = 1; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 334 | } else if (!strncmp(str, "strict", 6)) { |
| 335 | printk(KERN_INFO |
| 336 | "Intel-IOMMU: disable batched IOTLB flush\n"); |
| 337 | intel_iommu_strict = 1; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | str += strcspn(str, ","); |
| 341 | while (*str == ',') |
| 342 | str++; |
| 343 | } |
| 344 | return 0; |
| 345 | } |
| 346 | __setup("intel_iommu=", intel_iommu_setup); |
| 347 | |
| 348 | static struct kmem_cache *iommu_domain_cache; |
| 349 | static struct kmem_cache *iommu_devinfo_cache; |
| 350 | static struct kmem_cache *iommu_iova_cache; |
| 351 | |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 352 | static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) |
| 353 | { |
| 354 | unsigned int flags; |
| 355 | void *vaddr; |
| 356 | |
| 357 | /* trying to avoid low memory issues */ |
| 358 | flags = current->flags & PF_MEMALLOC; |
| 359 | current->flags |= PF_MEMALLOC; |
| 360 | vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC); |
| 361 | current->flags &= (~PF_MEMALLOC | flags); |
| 362 | return vaddr; |
| 363 | } |
| 364 | |
| 365 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 366 | static inline void *alloc_pgtable_page(void) |
| 367 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 368 | unsigned int flags; |
| 369 | void *vaddr; |
| 370 | |
| 371 | /* trying to avoid low memory issues */ |
| 372 | flags = current->flags & PF_MEMALLOC; |
| 373 | current->flags |= PF_MEMALLOC; |
| 374 | vaddr = (void *)get_zeroed_page(GFP_ATOMIC); |
| 375 | current->flags &= (~PF_MEMALLOC | flags); |
| 376 | return vaddr; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | static inline void free_pgtable_page(void *vaddr) |
| 380 | { |
| 381 | free_page((unsigned long)vaddr); |
| 382 | } |
| 383 | |
| 384 | static inline void *alloc_domain_mem(void) |
| 385 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 386 | return iommu_kmem_cache_alloc(iommu_domain_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 387 | } |
| 388 | |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 389 | static void free_domain_mem(void *vaddr) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 390 | { |
| 391 | kmem_cache_free(iommu_domain_cache, vaddr); |
| 392 | } |
| 393 | |
| 394 | static inline void * alloc_devinfo_mem(void) |
| 395 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 396 | return iommu_kmem_cache_alloc(iommu_devinfo_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | static inline void free_devinfo_mem(void *vaddr) |
| 400 | { |
| 401 | kmem_cache_free(iommu_devinfo_cache, vaddr); |
| 402 | } |
| 403 | |
| 404 | struct iova *alloc_iova_mem(void) |
| 405 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 406 | return iommu_kmem_cache_alloc(iommu_iova_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | void free_iova_mem(struct iova *iova) |
| 410 | { |
| 411 | kmem_cache_free(iommu_iova_cache, iova); |
| 412 | } |
| 413 | |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 414 | |
| 415 | static inline int width_to_agaw(int width); |
| 416 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 417 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 418 | { |
| 419 | unsigned long sagaw; |
| 420 | int agaw = -1; |
| 421 | |
| 422 | sagaw = cap_sagaw(iommu->cap); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 423 | for (agaw = width_to_agaw(max_gaw); |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 424 | agaw >= 0; agaw--) { |
| 425 | if (test_bit(agaw, &sagaw)) |
| 426 | break; |
| 427 | } |
| 428 | |
| 429 | return agaw; |
| 430 | } |
| 431 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 432 | /* |
| 433 | * Calculate max SAGAW for each iommu. |
| 434 | */ |
| 435 | int iommu_calculate_max_sagaw(struct intel_iommu *iommu) |
| 436 | { |
| 437 | return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * calculate agaw for each iommu. |
| 442 | * "SAGAW" may be different across iommus, use a default agaw, and |
| 443 | * get a supported less agaw for iommus that don't support the default agaw. |
| 444 | */ |
| 445 | int iommu_calculate_agaw(struct intel_iommu *iommu) |
| 446 | { |
| 447 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 448 | } |
| 449 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 450 | /* This functionin only returns single iommu in a domain */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 451 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) |
| 452 | { |
| 453 | int iommu_id; |
| 454 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 455 | /* si_domain and vm domain should not get here. */ |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 456 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 457 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 458 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 459 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 460 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
| 461 | return NULL; |
| 462 | |
| 463 | return g_iommus[iommu_id]; |
| 464 | } |
| 465 | |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 466 | static void domain_update_iommu_coherency(struct dmar_domain *domain) |
| 467 | { |
| 468 | int i; |
| 469 | |
| 470 | domain->iommu_coherency = 1; |
| 471 | |
| 472 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 473 | for (; i < g_num_of_iommus; ) { |
| 474 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
| 475 | domain->iommu_coherency = 0; |
| 476 | break; |
| 477 | } |
| 478 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 479 | } |
| 480 | } |
| 481 | |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 482 | static void domain_update_iommu_snooping(struct dmar_domain *domain) |
| 483 | { |
| 484 | int i; |
| 485 | |
| 486 | domain->iommu_snooping = 1; |
| 487 | |
| 488 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 489 | for (; i < g_num_of_iommus; ) { |
| 490 | if (!ecap_sc_support(g_iommus[i]->ecap)) { |
| 491 | domain->iommu_snooping = 0; |
| 492 | break; |
| 493 | } |
| 494 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | /* Some capabilities may be different across iommus */ |
| 499 | static void domain_update_iommu_cap(struct dmar_domain *domain) |
| 500 | { |
| 501 | domain_update_iommu_coherency(domain); |
| 502 | domain_update_iommu_snooping(domain); |
| 503 | } |
| 504 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 505 | static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 506 | { |
| 507 | struct dmar_drhd_unit *drhd = NULL; |
| 508 | int i; |
| 509 | |
| 510 | for_each_drhd_unit(drhd) { |
| 511 | if (drhd->ignored) |
| 512 | continue; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 513 | if (segment != drhd->segment) |
| 514 | continue; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 515 | |
David Woodhouse | 924b623 | 2009-04-04 00:39:25 +0100 | [diff] [blame] | 516 | for (i = 0; i < drhd->devices_cnt; i++) { |
Dirk Hohndel | 288e487 | 2009-01-11 15:33:51 +0000 | [diff] [blame] | 517 | if (drhd->devices[i] && |
| 518 | drhd->devices[i]->bus->number == bus && |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 519 | drhd->devices[i]->devfn == devfn) |
| 520 | return drhd->iommu; |
David Woodhouse | 4958c5d | 2009-04-06 13:30:01 -0700 | [diff] [blame] | 521 | if (drhd->devices[i] && |
| 522 | drhd->devices[i]->subordinate && |
David Woodhouse | 924b623 | 2009-04-04 00:39:25 +0100 | [diff] [blame] | 523 | drhd->devices[i]->subordinate->number <= bus && |
| 524 | drhd->devices[i]->subordinate->subordinate >= bus) |
| 525 | return drhd->iommu; |
| 526 | } |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 527 | |
| 528 | if (drhd->include_all) |
| 529 | return drhd->iommu; |
| 530 | } |
| 531 | |
| 532 | return NULL; |
| 533 | } |
| 534 | |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 535 | static void domain_flush_cache(struct dmar_domain *domain, |
| 536 | void *addr, int size) |
| 537 | { |
| 538 | if (!domain->iommu_coherency) |
| 539 | clflush_cache_range(addr, size); |
| 540 | } |
| 541 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 542 | /* Gets context entry for a given bus and devfn */ |
| 543 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
| 544 | u8 bus, u8 devfn) |
| 545 | { |
| 546 | struct root_entry *root; |
| 547 | struct context_entry *context; |
| 548 | unsigned long phy_addr; |
| 549 | unsigned long flags; |
| 550 | |
| 551 | spin_lock_irqsave(&iommu->lock, flags); |
| 552 | root = &iommu->root_entry[bus]; |
| 553 | context = get_context_addr_from_root(root); |
| 554 | if (!context) { |
| 555 | context = (struct context_entry *)alloc_pgtable_page(); |
| 556 | if (!context) { |
| 557 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 558 | return NULL; |
| 559 | } |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 560 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 561 | phy_addr = virt_to_phys((void *)context); |
| 562 | set_root_value(root, phy_addr); |
| 563 | set_root_present(root); |
| 564 | __iommu_flush_cache(iommu, root, sizeof(*root)); |
| 565 | } |
| 566 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 567 | return &context[devfn]; |
| 568 | } |
| 569 | |
| 570 | static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) |
| 571 | { |
| 572 | struct root_entry *root; |
| 573 | struct context_entry *context; |
| 574 | int ret; |
| 575 | unsigned long flags; |
| 576 | |
| 577 | spin_lock_irqsave(&iommu->lock, flags); |
| 578 | root = &iommu->root_entry[bus]; |
| 579 | context = get_context_addr_from_root(root); |
| 580 | if (!context) { |
| 581 | ret = 0; |
| 582 | goto out; |
| 583 | } |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 584 | ret = context_present(&context[devfn]); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 585 | out: |
| 586 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 587 | return ret; |
| 588 | } |
| 589 | |
| 590 | static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) |
| 591 | { |
| 592 | struct root_entry *root; |
| 593 | struct context_entry *context; |
| 594 | unsigned long flags; |
| 595 | |
| 596 | spin_lock_irqsave(&iommu->lock, flags); |
| 597 | root = &iommu->root_entry[bus]; |
| 598 | context = get_context_addr_from_root(root); |
| 599 | if (context) { |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 600 | context_clear_entry(&context[devfn]); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 601 | __iommu_flush_cache(iommu, &context[devfn], \ |
| 602 | sizeof(*context)); |
| 603 | } |
| 604 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 605 | } |
| 606 | |
| 607 | static void free_context_table(struct intel_iommu *iommu) |
| 608 | { |
| 609 | struct root_entry *root; |
| 610 | int i; |
| 611 | unsigned long flags; |
| 612 | struct context_entry *context; |
| 613 | |
| 614 | spin_lock_irqsave(&iommu->lock, flags); |
| 615 | if (!iommu->root_entry) { |
| 616 | goto out; |
| 617 | } |
| 618 | for (i = 0; i < ROOT_ENTRY_NR; i++) { |
| 619 | root = &iommu->root_entry[i]; |
| 620 | context = get_context_addr_from_root(root); |
| 621 | if (context) |
| 622 | free_pgtable_page(context); |
| 623 | } |
| 624 | free_pgtable_page(iommu->root_entry); |
| 625 | iommu->root_entry = NULL; |
| 626 | out: |
| 627 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 628 | } |
| 629 | |
| 630 | /* page table handling */ |
| 631 | #define LEVEL_STRIDE (9) |
| 632 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) |
| 633 | |
| 634 | static inline int agaw_to_level(int agaw) |
| 635 | { |
| 636 | return agaw + 2; |
| 637 | } |
| 638 | |
| 639 | static inline int agaw_to_width(int agaw) |
| 640 | { |
| 641 | return 30 + agaw * LEVEL_STRIDE; |
| 642 | |
| 643 | } |
| 644 | |
| 645 | static inline int width_to_agaw(int width) |
| 646 | { |
| 647 | return (width - 30) / LEVEL_STRIDE; |
| 648 | } |
| 649 | |
| 650 | static inline unsigned int level_to_offset_bits(int level) |
| 651 | { |
| 652 | return (12 + (level - 1) * LEVEL_STRIDE); |
| 653 | } |
| 654 | |
| 655 | static inline int address_level_offset(u64 addr, int level) |
| 656 | { |
| 657 | return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK); |
| 658 | } |
| 659 | |
| 660 | static inline u64 level_mask(int level) |
| 661 | { |
| 662 | return ((u64)-1 << level_to_offset_bits(level)); |
| 663 | } |
| 664 | |
| 665 | static inline u64 level_size(int level) |
| 666 | { |
| 667 | return ((u64)1 << level_to_offset_bits(level)); |
| 668 | } |
| 669 | |
| 670 | static inline u64 align_to_level(u64 addr, int level) |
| 671 | { |
| 672 | return ((addr + level_size(level) - 1) & level_mask(level)); |
| 673 | } |
| 674 | |
| 675 | static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) |
| 676 | { |
| 677 | int addr_width = agaw_to_width(domain->agaw); |
| 678 | struct dma_pte *parent, *pte = NULL; |
| 679 | int level = agaw_to_level(domain->agaw); |
| 680 | int offset; |
| 681 | unsigned long flags; |
| 682 | |
| 683 | BUG_ON(!domain->pgd); |
| 684 | |
| 685 | addr &= (((u64)1) << addr_width) - 1; |
| 686 | parent = domain->pgd; |
| 687 | |
| 688 | spin_lock_irqsave(&domain->mapping_lock, flags); |
| 689 | while (level > 0) { |
| 690 | void *tmp_page; |
| 691 | |
| 692 | offset = address_level_offset(addr, level); |
| 693 | pte = &parent[offset]; |
| 694 | if (level == 1) |
| 695 | break; |
| 696 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 697 | if (!dma_pte_present(pte)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 698 | tmp_page = alloc_pgtable_page(); |
| 699 | |
| 700 | if (!tmp_page) { |
| 701 | spin_unlock_irqrestore(&domain->mapping_lock, |
| 702 | flags); |
| 703 | return NULL; |
| 704 | } |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 705 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 706 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 707 | /* |
| 708 | * high level table always sets r/w, last level page |
| 709 | * table control read/write |
| 710 | */ |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 711 | dma_set_pte_readable(pte); |
| 712 | dma_set_pte_writable(pte); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 713 | domain_flush_cache(domain, pte, sizeof(*pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 714 | } |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 715 | parent = phys_to_virt(dma_pte_addr(pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 716 | level--; |
| 717 | } |
| 718 | |
| 719 | spin_unlock_irqrestore(&domain->mapping_lock, flags); |
| 720 | return pte; |
| 721 | } |
| 722 | |
| 723 | /* return address's pte at specific level */ |
| 724 | static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, |
| 725 | int level) |
| 726 | { |
| 727 | struct dma_pte *parent, *pte = NULL; |
| 728 | int total = agaw_to_level(domain->agaw); |
| 729 | int offset; |
| 730 | |
| 731 | parent = domain->pgd; |
| 732 | while (level <= total) { |
| 733 | offset = address_level_offset(addr, total); |
| 734 | pte = &parent[offset]; |
| 735 | if (level == total) |
| 736 | return pte; |
| 737 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 738 | if (!dma_pte_present(pte)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 739 | break; |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 740 | parent = phys_to_virt(dma_pte_addr(pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 741 | total--; |
| 742 | } |
| 743 | return NULL; |
| 744 | } |
| 745 | |
| 746 | /* clear one page's page table */ |
| 747 | static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) |
| 748 | { |
| 749 | struct dma_pte *pte = NULL; |
| 750 | |
| 751 | /* get last level pte */ |
| 752 | pte = dma_addr_level_pte(domain, addr, 1); |
| 753 | |
| 754 | if (pte) { |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 755 | dma_clear_pte(pte); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 756 | domain_flush_cache(domain, pte, sizeof(*pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 757 | } |
| 758 | } |
| 759 | |
| 760 | /* clear last level pte, a tlb flush should be followed */ |
| 761 | static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) |
| 762 | { |
| 763 | int addr_width = agaw_to_width(domain->agaw); |
Zhao, Yu | afeeb7c | 2009-02-13 17:55:49 +0800 | [diff] [blame] | 764 | int npages; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 765 | |
| 766 | start &= (((u64)1) << addr_width) - 1; |
| 767 | end &= (((u64)1) << addr_width) - 1; |
| 768 | /* in case it's partial page */ |
Fenghua Yu | 31d3568 | 2009-04-06 11:21:49 -0700 | [diff] [blame] | 769 | start &= PAGE_MASK; |
| 770 | end = PAGE_ALIGN(end); |
Zhao, Yu | afeeb7c | 2009-02-13 17:55:49 +0800 | [diff] [blame] | 771 | npages = (end - start) / VTD_PAGE_SIZE; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 772 | |
| 773 | /* we don't need lock here, nobody else touches the iova range */ |
Zhao, Yu | afeeb7c | 2009-02-13 17:55:49 +0800 | [diff] [blame] | 774 | while (npages--) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 775 | dma_pte_clear_one(domain, start); |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 776 | start += VTD_PAGE_SIZE; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 777 | } |
| 778 | } |
| 779 | |
| 780 | /* free page table pages. last level pte should already be cleared */ |
| 781 | static void dma_pte_free_pagetable(struct dmar_domain *domain, |
| 782 | u64 start, u64 end) |
| 783 | { |
| 784 | int addr_width = agaw_to_width(domain->agaw); |
| 785 | struct dma_pte *pte; |
| 786 | int total = agaw_to_level(domain->agaw); |
| 787 | int level; |
| 788 | u64 tmp; |
| 789 | |
| 790 | start &= (((u64)1) << addr_width) - 1; |
| 791 | end &= (((u64)1) << addr_width) - 1; |
| 792 | |
| 793 | /* we don't need lock here, nobody else touches the iova range */ |
| 794 | level = 2; |
| 795 | while (level <= total) { |
| 796 | tmp = align_to_level(start, level); |
| 797 | if (tmp >= end || (tmp + level_size(level) > end)) |
| 798 | return; |
| 799 | |
| 800 | while (tmp < end) { |
| 801 | pte = dma_addr_level_pte(domain, tmp, level); |
| 802 | if (pte) { |
| 803 | free_pgtable_page( |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 804 | phys_to_virt(dma_pte_addr(pte))); |
| 805 | dma_clear_pte(pte); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 806 | domain_flush_cache(domain, pte, sizeof(*pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 807 | } |
| 808 | tmp += level_size(level); |
| 809 | } |
| 810 | level++; |
| 811 | } |
| 812 | /* free pgd */ |
| 813 | if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) { |
| 814 | free_pgtable_page(domain->pgd); |
| 815 | domain->pgd = NULL; |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | /* iommu handling */ |
| 820 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) |
| 821 | { |
| 822 | struct root_entry *root; |
| 823 | unsigned long flags; |
| 824 | |
| 825 | root = (struct root_entry *)alloc_pgtable_page(); |
| 826 | if (!root) |
| 827 | return -ENOMEM; |
| 828 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 829 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 830 | |
| 831 | spin_lock_irqsave(&iommu->lock, flags); |
| 832 | iommu->root_entry = root; |
| 833 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 834 | |
| 835 | return 0; |
| 836 | } |
| 837 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 838 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
| 839 | { |
| 840 | void *addr; |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 841 | u32 sts; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 842 | unsigned long flag; |
| 843 | |
| 844 | addr = iommu->root_entry; |
| 845 | |
| 846 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 847 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); |
| 848 | |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 849 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 850 | |
| 851 | /* Make sure hardware complete it */ |
| 852 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 853 | readl, (sts & DMA_GSTS_RTPS), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 854 | |
| 855 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 856 | } |
| 857 | |
| 858 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) |
| 859 | { |
| 860 | u32 val; |
| 861 | unsigned long flag; |
| 862 | |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 863 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 864 | return; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 865 | |
| 866 | spin_lock_irqsave(&iommu->register_lock, flag); |
David Woodhouse | 462b60f | 2009-05-10 20:18:18 +0100 | [diff] [blame] | 867 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 868 | |
| 869 | /* Make sure hardware complete it */ |
| 870 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 871 | readl, (!(val & DMA_GSTS_WBFS)), val); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 872 | |
| 873 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 874 | } |
| 875 | |
| 876 | /* return value determine if we need a write buffer flush */ |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 877 | static void __iommu_flush_context(struct intel_iommu *iommu, |
| 878 | u16 did, u16 source_id, u8 function_mask, |
| 879 | u64 type) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 880 | { |
| 881 | u64 val = 0; |
| 882 | unsigned long flag; |
| 883 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 884 | switch (type) { |
| 885 | case DMA_CCMD_GLOBAL_INVL: |
| 886 | val = DMA_CCMD_GLOBAL_INVL; |
| 887 | break; |
| 888 | case DMA_CCMD_DOMAIN_INVL: |
| 889 | val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); |
| 890 | break; |
| 891 | case DMA_CCMD_DEVICE_INVL: |
| 892 | val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) |
| 893 | | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); |
| 894 | break; |
| 895 | default: |
| 896 | BUG(); |
| 897 | } |
| 898 | val |= DMA_CCMD_ICC; |
| 899 | |
| 900 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 901 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); |
| 902 | |
| 903 | /* Make sure hardware complete it */ |
| 904 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, |
| 905 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); |
| 906 | |
| 907 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 908 | } |
| 909 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 910 | /* return value determine if we need a write buffer flush */ |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 911 | static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
| 912 | u64 addr, unsigned int size_order, u64 type) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 913 | { |
| 914 | int tlb_offset = ecap_iotlb_offset(iommu->ecap); |
| 915 | u64 val = 0, val_iva = 0; |
| 916 | unsigned long flag; |
| 917 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 918 | switch (type) { |
| 919 | case DMA_TLB_GLOBAL_FLUSH: |
| 920 | /* global flush doesn't need set IVA_REG */ |
| 921 | val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; |
| 922 | break; |
| 923 | case DMA_TLB_DSI_FLUSH: |
| 924 | val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
| 925 | break; |
| 926 | case DMA_TLB_PSI_FLUSH: |
| 927 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
| 928 | /* Note: always flush non-leaf currently */ |
| 929 | val_iva = size_order | addr; |
| 930 | break; |
| 931 | default: |
| 932 | BUG(); |
| 933 | } |
| 934 | /* Note: set drain read/write */ |
| 935 | #if 0 |
| 936 | /* |
| 937 | * This is probably to be super secure.. Looks like we can |
| 938 | * ignore it without any impact. |
| 939 | */ |
| 940 | if (cap_read_drain(iommu->cap)) |
| 941 | val |= DMA_TLB_READ_DRAIN; |
| 942 | #endif |
| 943 | if (cap_write_drain(iommu->cap)) |
| 944 | val |= DMA_TLB_WRITE_DRAIN; |
| 945 | |
| 946 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 947 | /* Note: Only uses first TLB reg currently */ |
| 948 | if (val_iva) |
| 949 | dmar_writeq(iommu->reg + tlb_offset, val_iva); |
| 950 | dmar_writeq(iommu->reg + tlb_offset + 8, val); |
| 951 | |
| 952 | /* Make sure hardware complete it */ |
| 953 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, |
| 954 | dmar_readq, (!(val & DMA_TLB_IVT)), val); |
| 955 | |
| 956 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 957 | |
| 958 | /* check IOTLB invalidation granularity */ |
| 959 | if (DMA_TLB_IAIG(val) == 0) |
| 960 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
| 961 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
| 962 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 963 | (unsigned long long)DMA_TLB_IIRG(type), |
| 964 | (unsigned long long)DMA_TLB_IAIG(val)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 965 | } |
| 966 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 967 | static struct device_domain_info *iommu_support_dev_iotlb( |
| 968 | struct dmar_domain *domain, int segment, u8 bus, u8 devfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 969 | { |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 970 | int found = 0; |
| 971 | unsigned long flags; |
| 972 | struct device_domain_info *info; |
| 973 | struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); |
| 974 | |
| 975 | if (!ecap_dev_iotlb_support(iommu->ecap)) |
| 976 | return NULL; |
| 977 | |
| 978 | if (!iommu->qi) |
| 979 | return NULL; |
| 980 | |
| 981 | spin_lock_irqsave(&device_domain_lock, flags); |
| 982 | list_for_each_entry(info, &domain->devices, link) |
| 983 | if (info->bus == bus && info->devfn == devfn) { |
| 984 | found = 1; |
| 985 | break; |
| 986 | } |
| 987 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 988 | |
| 989 | if (!found || !info->dev) |
| 990 | return NULL; |
| 991 | |
| 992 | if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) |
| 993 | return NULL; |
| 994 | |
| 995 | if (!dmar_find_matched_atsr_unit(info->dev)) |
| 996 | return NULL; |
| 997 | |
| 998 | info->iommu = iommu; |
| 999 | |
| 1000 | return info; |
| 1001 | } |
| 1002 | |
| 1003 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
| 1004 | { |
| 1005 | if (!info) |
| 1006 | return; |
| 1007 | |
| 1008 | pci_enable_ats(info->dev, VTD_PAGE_SHIFT); |
| 1009 | } |
| 1010 | |
| 1011 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) |
| 1012 | { |
| 1013 | if (!info->dev || !pci_ats_enabled(info->dev)) |
| 1014 | return; |
| 1015 | |
| 1016 | pci_disable_ats(info->dev); |
| 1017 | } |
| 1018 | |
| 1019 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, |
| 1020 | u64 addr, unsigned mask) |
| 1021 | { |
| 1022 | u16 sid, qdep; |
| 1023 | unsigned long flags; |
| 1024 | struct device_domain_info *info; |
| 1025 | |
| 1026 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1027 | list_for_each_entry(info, &domain->devices, link) { |
| 1028 | if (!info->dev || !pci_ats_enabled(info->dev)) |
| 1029 | continue; |
| 1030 | |
| 1031 | sid = info->bus << 8 | info->devfn; |
| 1032 | qdep = pci_ats_queue_depth(info->dev); |
| 1033 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); |
| 1034 | } |
| 1035 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1036 | } |
| 1037 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1038 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
| 1039 | u64 addr, unsigned int pages) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1040 | { |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1041 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1042 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1043 | BUG_ON(addr & (~VTD_PAGE_MASK)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1044 | BUG_ON(pages == 0); |
| 1045 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1046 | /* |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1047 | * Fallback to domain selective flush if no PSI support or the size is |
| 1048 | * too big. |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1049 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
| 1050 | * aligned to the size |
| 1051 | */ |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1052 | if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) |
| 1053 | iommu->flush.flush_iotlb(iommu, did, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1054 | DMA_TLB_DSI_FLUSH); |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1055 | else |
| 1056 | iommu->flush.flush_iotlb(iommu, did, addr, mask, |
| 1057 | DMA_TLB_PSI_FLUSH); |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1058 | if (did) |
| 1059 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1060 | } |
| 1061 | |
mark gross | f8bab73 | 2008-02-08 04:18:38 -0800 | [diff] [blame] | 1062 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
| 1063 | { |
| 1064 | u32 pmen; |
| 1065 | unsigned long flags; |
| 1066 | |
| 1067 | spin_lock_irqsave(&iommu->register_lock, flags); |
| 1068 | pmen = readl(iommu->reg + DMAR_PMEN_REG); |
| 1069 | pmen &= ~DMA_PMEN_EPM; |
| 1070 | writel(pmen, iommu->reg + DMAR_PMEN_REG); |
| 1071 | |
| 1072 | /* wait for the protected region status bit to clear */ |
| 1073 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, |
| 1074 | readl, !(pmen & DMA_PMEN_PRS), pmen); |
| 1075 | |
| 1076 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1077 | } |
| 1078 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1079 | static int iommu_enable_translation(struct intel_iommu *iommu) |
| 1080 | { |
| 1081 | u32 sts; |
| 1082 | unsigned long flags; |
| 1083 | |
| 1084 | spin_lock_irqsave(&iommu->register_lock, flags); |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1085 | iommu->gcmd |= DMA_GCMD_TE; |
| 1086 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1087 | |
| 1088 | /* Make sure hardware complete it */ |
| 1089 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1090 | readl, (sts & DMA_GSTS_TES), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1091 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1092 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | static int iommu_disable_translation(struct intel_iommu *iommu) |
| 1097 | { |
| 1098 | u32 sts; |
| 1099 | unsigned long flag; |
| 1100 | |
| 1101 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 1102 | iommu->gcmd &= ~DMA_GCMD_TE; |
| 1103 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1104 | |
| 1105 | /* Make sure hardware complete it */ |
| 1106 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1107 | readl, (!(sts & DMA_GSTS_TES)), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1108 | |
| 1109 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1110 | return 0; |
| 1111 | } |
| 1112 | |
Keshavamurthy, Anil S | 3460a6d | 2007-10-21 16:41:54 -0700 | [diff] [blame] | 1113 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1114 | static int iommu_init_domains(struct intel_iommu *iommu) |
| 1115 | { |
| 1116 | unsigned long ndomains; |
| 1117 | unsigned long nlongs; |
| 1118 | |
| 1119 | ndomains = cap_ndoms(iommu->cap); |
| 1120 | pr_debug("Number of Domains supportd <%ld>\n", ndomains); |
| 1121 | nlongs = BITS_TO_LONGS(ndomains); |
| 1122 | |
| 1123 | /* TBD: there might be 64K domains, |
| 1124 | * consider other allocation for future chip |
| 1125 | */ |
| 1126 | iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); |
| 1127 | if (!iommu->domain_ids) { |
| 1128 | printk(KERN_ERR "Allocating domain id array failed\n"); |
| 1129 | return -ENOMEM; |
| 1130 | } |
| 1131 | iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), |
| 1132 | GFP_KERNEL); |
| 1133 | if (!iommu->domains) { |
| 1134 | printk(KERN_ERR "Allocating domain array failed\n"); |
| 1135 | kfree(iommu->domain_ids); |
| 1136 | return -ENOMEM; |
| 1137 | } |
| 1138 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 1139 | spin_lock_init(&iommu->lock); |
| 1140 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1141 | /* |
| 1142 | * if Caching mode is set, then invalid translations are tagged |
| 1143 | * with domainid 0. Hence we need to pre-allocate it. |
| 1144 | */ |
| 1145 | if (cap_caching_mode(iommu->cap)) |
| 1146 | set_bit(0, iommu->domain_ids); |
| 1147 | return 0; |
| 1148 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1149 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1150 | |
| 1151 | static void domain_exit(struct dmar_domain *domain); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 1152 | static void vm_domain_exit(struct dmar_domain *domain); |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 1153 | |
| 1154 | void free_dmar_iommu(struct intel_iommu *iommu) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1155 | { |
| 1156 | struct dmar_domain *domain; |
| 1157 | int i; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1158 | unsigned long flags; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1159 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1160 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
| 1161 | for (; i < cap_ndoms(iommu->cap); ) { |
| 1162 | domain = iommu->domains[i]; |
| 1163 | clear_bit(i, iommu->domain_ids); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1164 | |
| 1165 | spin_lock_irqsave(&domain->iommu_lock, flags); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 1166 | if (--domain->iommu_count == 0) { |
| 1167 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) |
| 1168 | vm_domain_exit(domain); |
| 1169 | else |
| 1170 | domain_exit(domain); |
| 1171 | } |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1172 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
| 1173 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1174 | i = find_next_bit(iommu->domain_ids, |
| 1175 | cap_ndoms(iommu->cap), i+1); |
| 1176 | } |
| 1177 | |
| 1178 | if (iommu->gcmd & DMA_GCMD_TE) |
| 1179 | iommu_disable_translation(iommu); |
| 1180 | |
| 1181 | if (iommu->irq) { |
| 1182 | set_irq_data(iommu->irq, NULL); |
| 1183 | /* This will mask the irq */ |
| 1184 | free_irq(iommu->irq, iommu); |
| 1185 | destroy_irq(iommu->irq); |
| 1186 | } |
| 1187 | |
| 1188 | kfree(iommu->domains); |
| 1189 | kfree(iommu->domain_ids); |
| 1190 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 1191 | g_iommus[iommu->seq_id] = NULL; |
| 1192 | |
| 1193 | /* if all iommus are freed, free g_iommus */ |
| 1194 | for (i = 0; i < g_num_of_iommus; i++) { |
| 1195 | if (g_iommus[i]) |
| 1196 | break; |
| 1197 | } |
| 1198 | |
| 1199 | if (i == g_num_of_iommus) |
| 1200 | kfree(g_iommus); |
| 1201 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1202 | /* free context mapping */ |
| 1203 | free_context_table(iommu); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1204 | } |
| 1205 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1206 | static struct dmar_domain *alloc_domain(void) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1207 | { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1208 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1209 | |
| 1210 | domain = alloc_domain_mem(); |
| 1211 | if (!domain) |
| 1212 | return NULL; |
| 1213 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1214 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
Weidong Han | d71a2f3 | 2008-12-07 21:13:41 +0800 | [diff] [blame] | 1215 | domain->flags = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1216 | |
| 1217 | return domain; |
| 1218 | } |
| 1219 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1220 | static int iommu_attach_domain(struct dmar_domain *domain, |
| 1221 | struct intel_iommu *iommu) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1222 | { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1223 | int num; |
| 1224 | unsigned long ndomains; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1225 | unsigned long flags; |
| 1226 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1227 | ndomains = cap_ndoms(iommu->cap); |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1228 | |
| 1229 | spin_lock_irqsave(&iommu->lock, flags); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1230 | |
| 1231 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
| 1232 | if (num >= ndomains) { |
| 1233 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1234 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
| 1235 | return -ENOMEM; |
| 1236 | } |
| 1237 | |
| 1238 | domain->id = num; |
| 1239 | set_bit(num, iommu->domain_ids); |
| 1240 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
| 1241 | iommu->domains[num] = domain; |
| 1242 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1243 | |
| 1244 | return 0; |
| 1245 | } |
| 1246 | |
| 1247 | static void iommu_detach_domain(struct dmar_domain *domain, |
| 1248 | struct intel_iommu *iommu) |
| 1249 | { |
| 1250 | unsigned long flags; |
| 1251 | int num, ndomains; |
| 1252 | int found = 0; |
| 1253 | |
| 1254 | spin_lock_irqsave(&iommu->lock, flags); |
| 1255 | ndomains = cap_ndoms(iommu->cap); |
| 1256 | num = find_first_bit(iommu->domain_ids, ndomains); |
| 1257 | for (; num < ndomains; ) { |
| 1258 | if (iommu->domains[num] == domain) { |
| 1259 | found = 1; |
| 1260 | break; |
| 1261 | } |
| 1262 | num = find_next_bit(iommu->domain_ids, |
| 1263 | cap_ndoms(iommu->cap), num+1); |
| 1264 | } |
| 1265 | |
| 1266 | if (found) { |
| 1267 | clear_bit(num, iommu->domain_ids); |
| 1268 | clear_bit(iommu->seq_id, &domain->iommu_bmp); |
| 1269 | iommu->domains[num] = NULL; |
| 1270 | } |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1271 | spin_unlock_irqrestore(&iommu->lock, flags); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1272 | } |
| 1273 | |
| 1274 | static struct iova_domain reserved_iova_list; |
Mark Gross | 8a443df | 2008-03-04 14:59:31 -0800 | [diff] [blame] | 1275 | static struct lock_class_key reserved_alloc_key; |
| 1276 | static struct lock_class_key reserved_rbtree_key; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1277 | |
| 1278 | static void dmar_init_reserved_ranges(void) |
| 1279 | { |
| 1280 | struct pci_dev *pdev = NULL; |
| 1281 | struct iova *iova; |
| 1282 | int i; |
| 1283 | u64 addr, size; |
| 1284 | |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 1285 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1286 | |
Mark Gross | 8a443df | 2008-03-04 14:59:31 -0800 | [diff] [blame] | 1287 | lockdep_set_class(&reserved_iova_list.iova_alloc_lock, |
| 1288 | &reserved_alloc_key); |
| 1289 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, |
| 1290 | &reserved_rbtree_key); |
| 1291 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1292 | /* IOAPIC ranges shouldn't be accessed by DMA */ |
| 1293 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), |
| 1294 | IOVA_PFN(IOAPIC_RANGE_END)); |
| 1295 | if (!iova) |
| 1296 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); |
| 1297 | |
| 1298 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ |
| 1299 | for_each_pci_dev(pdev) { |
| 1300 | struct resource *r; |
| 1301 | |
| 1302 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { |
| 1303 | r = &pdev->resource[i]; |
| 1304 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
| 1305 | continue; |
| 1306 | addr = r->start; |
David Woodhouse | fd18de5 | 2009-05-10 23:57:41 +0100 | [diff] [blame] | 1307 | addr &= PHYSICAL_PAGE_MASK; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1308 | size = r->end - addr; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1309 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1310 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), |
| 1311 | IOVA_PFN(size + addr) - 1); |
| 1312 | if (!iova) |
| 1313 | printk(KERN_ERR "Reserve iova failed\n"); |
| 1314 | } |
| 1315 | } |
| 1316 | |
| 1317 | } |
| 1318 | |
| 1319 | static void domain_reserve_special_ranges(struct dmar_domain *domain) |
| 1320 | { |
| 1321 | copy_reserved_iova(&reserved_iova_list, &domain->iovad); |
| 1322 | } |
| 1323 | |
| 1324 | static inline int guestwidth_to_adjustwidth(int gaw) |
| 1325 | { |
| 1326 | int agaw; |
| 1327 | int r = (gaw - 12) % 9; |
| 1328 | |
| 1329 | if (r == 0) |
| 1330 | agaw = gaw; |
| 1331 | else |
| 1332 | agaw = gaw + 9 - r; |
| 1333 | if (agaw > 64) |
| 1334 | agaw = 64; |
| 1335 | return agaw; |
| 1336 | } |
| 1337 | |
| 1338 | static int domain_init(struct dmar_domain *domain, int guest_width) |
| 1339 | { |
| 1340 | struct intel_iommu *iommu; |
| 1341 | int adjust_width, agaw; |
| 1342 | unsigned long sagaw; |
| 1343 | |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 1344 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1345 | spin_lock_init(&domain->mapping_lock); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1346 | spin_lock_init(&domain->iommu_lock); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1347 | |
| 1348 | domain_reserve_special_ranges(domain); |
| 1349 | |
| 1350 | /* calculate AGAW */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1351 | iommu = domain_get_iommu(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1352 | if (guest_width > cap_mgaw(iommu->cap)) |
| 1353 | guest_width = cap_mgaw(iommu->cap); |
| 1354 | domain->gaw = guest_width; |
| 1355 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
| 1356 | agaw = width_to_agaw(adjust_width); |
| 1357 | sagaw = cap_sagaw(iommu->cap); |
| 1358 | if (!test_bit(agaw, &sagaw)) { |
| 1359 | /* hardware doesn't support it, choose a bigger one */ |
| 1360 | pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); |
| 1361 | agaw = find_next_bit(&sagaw, 5, agaw); |
| 1362 | if (agaw >= 5) |
| 1363 | return -ENODEV; |
| 1364 | } |
| 1365 | domain->agaw = agaw; |
| 1366 | INIT_LIST_HEAD(&domain->devices); |
| 1367 | |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 1368 | if (ecap_coherent(iommu->ecap)) |
| 1369 | domain->iommu_coherency = 1; |
| 1370 | else |
| 1371 | domain->iommu_coherency = 0; |
| 1372 | |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 1373 | if (ecap_sc_support(iommu->ecap)) |
| 1374 | domain->iommu_snooping = 1; |
| 1375 | else |
| 1376 | domain->iommu_snooping = 0; |
| 1377 | |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1378 | domain->iommu_count = 1; |
| 1379 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1380 | /* always allocate the top pgd */ |
| 1381 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
| 1382 | if (!domain->pgd) |
| 1383 | return -ENOMEM; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1384 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1385 | return 0; |
| 1386 | } |
| 1387 | |
| 1388 | static void domain_exit(struct dmar_domain *domain) |
| 1389 | { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1390 | struct dmar_drhd_unit *drhd; |
| 1391 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1392 | u64 end; |
| 1393 | |
| 1394 | /* Domain 0 is reserved, so dont process it */ |
| 1395 | if (!domain) |
| 1396 | return; |
| 1397 | |
| 1398 | domain_remove_dev_info(domain); |
| 1399 | /* destroy iovas */ |
| 1400 | put_iova_domain(&domain->iovad); |
| 1401 | end = DOMAIN_MAX_ADDR(domain->gaw); |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1402 | end = end & (~PAGE_MASK); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1403 | |
| 1404 | /* clear ptes */ |
| 1405 | dma_pte_clear_range(domain, 0, end); |
| 1406 | |
| 1407 | /* free page tables */ |
| 1408 | dma_pte_free_pagetable(domain, 0, end); |
| 1409 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1410 | for_each_active_iommu(iommu, drhd) |
| 1411 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) |
| 1412 | iommu_detach_domain(domain, iommu); |
| 1413 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1414 | free_domain_mem(domain); |
| 1415 | } |
| 1416 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1417 | static int domain_context_mapping_one(struct dmar_domain *domain, int segment, |
| 1418 | u8 bus, u8 devfn, int translation) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1419 | { |
| 1420 | struct context_entry *context; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1421 | unsigned long flags; |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1422 | struct intel_iommu *iommu; |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1423 | struct dma_pte *pgd; |
| 1424 | unsigned long num; |
| 1425 | unsigned long ndomains; |
| 1426 | int id; |
| 1427 | int agaw; |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1428 | struct device_domain_info *info = NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1429 | |
| 1430 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
| 1431 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1432 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1433 | BUG_ON(!domain->pgd); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1434 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && |
| 1435 | translation != CONTEXT_TT_MULTI_LEVEL); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1436 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1437 | iommu = device_to_iommu(segment, bus, devfn); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1438 | if (!iommu) |
| 1439 | return -ENODEV; |
| 1440 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1441 | context = device_to_context_entry(iommu, bus, devfn); |
| 1442 | if (!context) |
| 1443 | return -ENOMEM; |
| 1444 | spin_lock_irqsave(&iommu->lock, flags); |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 1445 | if (context_present(context)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1446 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1447 | return 0; |
| 1448 | } |
| 1449 | |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1450 | id = domain->id; |
| 1451 | pgd = domain->pgd; |
| 1452 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1453 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 1454 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1455 | int found = 0; |
| 1456 | |
| 1457 | /* find an available domain id for this device in iommu */ |
| 1458 | ndomains = cap_ndoms(iommu->cap); |
| 1459 | num = find_first_bit(iommu->domain_ids, ndomains); |
| 1460 | for (; num < ndomains; ) { |
| 1461 | if (iommu->domains[num] == domain) { |
| 1462 | id = num; |
| 1463 | found = 1; |
| 1464 | break; |
| 1465 | } |
| 1466 | num = find_next_bit(iommu->domain_ids, |
| 1467 | cap_ndoms(iommu->cap), num+1); |
| 1468 | } |
| 1469 | |
| 1470 | if (found == 0) { |
| 1471 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
| 1472 | if (num >= ndomains) { |
| 1473 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1474 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
| 1475 | return -EFAULT; |
| 1476 | } |
| 1477 | |
| 1478 | set_bit(num, iommu->domain_ids); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1479 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1480 | iommu->domains[num] = domain; |
| 1481 | id = num; |
| 1482 | } |
| 1483 | |
| 1484 | /* Skip top levels of page tables for |
| 1485 | * iommu which has less agaw than default. |
| 1486 | */ |
| 1487 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { |
| 1488 | pgd = phys_to_virt(dma_pte_addr(pgd)); |
| 1489 | if (!dma_pte_present(pgd)) { |
| 1490 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1491 | return -ENOMEM; |
| 1492 | } |
| 1493 | } |
| 1494 | } |
| 1495 | |
| 1496 | context_set_domain_id(context, id); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1497 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1498 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
| 1499 | info = iommu_support_dev_iotlb(domain, segment, bus, devfn); |
| 1500 | translation = info ? CONTEXT_TT_DEV_IOTLB : |
| 1501 | CONTEXT_TT_MULTI_LEVEL; |
| 1502 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1503 | /* |
| 1504 | * In pass through mode, AW must be programmed to indicate the largest |
| 1505 | * AGAW value supported by hardware. And ASR is ignored by hardware. |
| 1506 | */ |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1507 | if (unlikely(translation == CONTEXT_TT_PASS_THROUGH)) |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1508 | context_set_address_width(context, iommu->msagaw); |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1509 | else { |
| 1510 | context_set_address_root(context, virt_to_phys(pgd)); |
| 1511 | context_set_address_width(context, iommu->agaw); |
| 1512 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1513 | |
| 1514 | context_set_translation_type(context, translation); |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 1515 | context_set_fault_enable(context); |
| 1516 | context_set_present(context); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1517 | domain_flush_cache(domain, context, sizeof(*context)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1518 | |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1519 | /* |
| 1520 | * It's a non-present to present mapping. If hardware doesn't cache |
| 1521 | * non-present entry we only need to flush the write-buffer. If the |
| 1522 | * _does_ cache non-present entries, then it does so in the special |
| 1523 | * domain #0, which we have to flush: |
| 1524 | */ |
| 1525 | if (cap_caching_mode(iommu->cap)) { |
| 1526 | iommu->flush.flush_context(iommu, 0, |
| 1527 | (((u16)bus) << 8) | devfn, |
| 1528 | DMA_CCMD_MASK_NOBIT, |
| 1529 | DMA_CCMD_DEVICE_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1530 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1531 | } else { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1532 | iommu_flush_write_buffer(iommu); |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1533 | } |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1534 | iommu_enable_dev_iotlb(info); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1535 | spin_unlock_irqrestore(&iommu->lock, flags); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1536 | |
| 1537 | spin_lock_irqsave(&domain->iommu_lock, flags); |
| 1538 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { |
| 1539 | domain->iommu_count++; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 1540 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1541 | } |
| 1542 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1543 | return 0; |
| 1544 | } |
| 1545 | |
| 1546 | static int |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1547 | domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, |
| 1548 | int translation) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1549 | { |
| 1550 | int ret; |
| 1551 | struct pci_dev *tmp, *parent; |
| 1552 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1553 | ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1554 | pdev->bus->number, pdev->devfn, |
| 1555 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1556 | if (ret) |
| 1557 | return ret; |
| 1558 | |
| 1559 | /* dependent device mapping */ |
| 1560 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1561 | if (!tmp) |
| 1562 | return 0; |
| 1563 | /* Secondary interface's bus number and devfn 0 */ |
| 1564 | parent = pdev->bus->self; |
| 1565 | while (parent != tmp) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1566 | ret = domain_context_mapping_one(domain, |
| 1567 | pci_domain_nr(parent->bus), |
| 1568 | parent->bus->number, |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1569 | parent->devfn, translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1570 | if (ret) |
| 1571 | return ret; |
| 1572 | parent = parent->bus->self; |
| 1573 | } |
| 1574 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
| 1575 | return domain_context_mapping_one(domain, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1576 | pci_domain_nr(tmp->subordinate), |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1577 | tmp->subordinate->number, 0, |
| 1578 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1579 | else /* this is a legacy PCI bridge */ |
| 1580 | return domain_context_mapping_one(domain, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1581 | pci_domain_nr(tmp->bus), |
| 1582 | tmp->bus->number, |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1583 | tmp->devfn, |
| 1584 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1585 | } |
| 1586 | |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1587 | static int domain_context_mapped(struct pci_dev *pdev) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1588 | { |
| 1589 | int ret; |
| 1590 | struct pci_dev *tmp, *parent; |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1591 | struct intel_iommu *iommu; |
| 1592 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1593 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 1594 | pdev->devfn); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1595 | if (!iommu) |
| 1596 | return -ENODEV; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1597 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1598 | ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1599 | if (!ret) |
| 1600 | return ret; |
| 1601 | /* dependent device mapping */ |
| 1602 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1603 | if (!tmp) |
| 1604 | return ret; |
| 1605 | /* Secondary interface's bus number and devfn 0 */ |
| 1606 | parent = pdev->bus->self; |
| 1607 | while (parent != tmp) { |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1608 | ret = device_context_mapped(iommu, parent->bus->number, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1609 | parent->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1610 | if (!ret) |
| 1611 | return ret; |
| 1612 | parent = parent->bus->self; |
| 1613 | } |
| 1614 | if (tmp->is_pcie) |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1615 | return device_context_mapped(iommu, tmp->subordinate->number, |
| 1616 | 0); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1617 | else |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1618 | return device_context_mapped(iommu, tmp->bus->number, |
| 1619 | tmp->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1620 | } |
| 1621 | |
| 1622 | static int |
| 1623 | domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, |
| 1624 | u64 hpa, size_t size, int prot) |
| 1625 | { |
| 1626 | u64 start_pfn, end_pfn; |
| 1627 | struct dma_pte *pte; |
| 1628 | int index; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1629 | int addr_width = agaw_to_width(domain->agaw); |
| 1630 | |
| 1631 | hpa &= (((u64)1) << addr_width) - 1; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1632 | |
| 1633 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
| 1634 | return -EINVAL; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1635 | iova &= PAGE_MASK; |
| 1636 | start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; |
| 1637 | end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1638 | index = 0; |
| 1639 | while (start_pfn < end_pfn) { |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1640 | pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1641 | if (!pte) |
| 1642 | return -ENOMEM; |
| 1643 | /* We don't need lock here, nobody else |
| 1644 | * touches the iova range |
| 1645 | */ |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 1646 | BUG_ON(dma_pte_addr(pte)); |
| 1647 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); |
| 1648 | dma_set_pte_prot(pte, prot); |
Sheng Yang | 9cf0669 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 1649 | if (prot & DMA_PTE_SNP) |
| 1650 | dma_set_pte_snp(pte); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1651 | domain_flush_cache(domain, pte, sizeof(*pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1652 | start_pfn++; |
| 1653 | index++; |
| 1654 | } |
| 1655 | return 0; |
| 1656 | } |
| 1657 | |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1658 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1659 | { |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1660 | if (!iommu) |
| 1661 | return; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1662 | |
| 1663 | clear_context_table(iommu, bus, devfn); |
| 1664 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1665 | DMA_CCMD_GLOBAL_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1666 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1667 | } |
| 1668 | |
| 1669 | static void domain_remove_dev_info(struct dmar_domain *domain) |
| 1670 | { |
| 1671 | struct device_domain_info *info; |
| 1672 | unsigned long flags; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1673 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1674 | |
| 1675 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1676 | while (!list_empty(&domain->devices)) { |
| 1677 | info = list_entry(domain->devices.next, |
| 1678 | struct device_domain_info, link); |
| 1679 | list_del(&info->link); |
| 1680 | list_del(&info->global); |
| 1681 | if (info->dev) |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1682 | info->dev->dev.archdata.iommu = NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1683 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1684 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1685 | iommu_disable_dev_iotlb(info); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1686 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1687 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1688 | free_devinfo_mem(info); |
| 1689 | |
| 1690 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1691 | } |
| 1692 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1693 | } |
| 1694 | |
| 1695 | /* |
| 1696 | * find_domain |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1697 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1698 | */ |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 1699 | static struct dmar_domain * |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1700 | find_domain(struct pci_dev *pdev) |
| 1701 | { |
| 1702 | struct device_domain_info *info; |
| 1703 | |
| 1704 | /* No lock here, assumes no domain exit in normal case */ |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1705 | info = pdev->dev.archdata.iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1706 | if (info) |
| 1707 | return info->domain; |
| 1708 | return NULL; |
| 1709 | } |
| 1710 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1711 | /* domain is initialized */ |
| 1712 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
| 1713 | { |
| 1714 | struct dmar_domain *domain, *found = NULL; |
| 1715 | struct intel_iommu *iommu; |
| 1716 | struct dmar_drhd_unit *drhd; |
| 1717 | struct device_domain_info *info, *tmp; |
| 1718 | struct pci_dev *dev_tmp; |
| 1719 | unsigned long flags; |
| 1720 | int bus = 0, devfn = 0; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1721 | int segment; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1722 | int ret; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1723 | |
| 1724 | domain = find_domain(pdev); |
| 1725 | if (domain) |
| 1726 | return domain; |
| 1727 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1728 | segment = pci_domain_nr(pdev->bus); |
| 1729 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1730 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1731 | if (dev_tmp) { |
| 1732 | if (dev_tmp->is_pcie) { |
| 1733 | bus = dev_tmp->subordinate->number; |
| 1734 | devfn = 0; |
| 1735 | } else { |
| 1736 | bus = dev_tmp->bus->number; |
| 1737 | devfn = dev_tmp->devfn; |
| 1738 | } |
| 1739 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1740 | list_for_each_entry(info, &device_domain_list, global) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1741 | if (info->segment == segment && |
| 1742 | info->bus == bus && info->devfn == devfn) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1743 | found = info->domain; |
| 1744 | break; |
| 1745 | } |
| 1746 | } |
| 1747 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1748 | /* pcie-pci bridge already has a domain, uses it */ |
| 1749 | if (found) { |
| 1750 | domain = found; |
| 1751 | goto found_domain; |
| 1752 | } |
| 1753 | } |
| 1754 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1755 | domain = alloc_domain(); |
| 1756 | if (!domain) |
| 1757 | goto error; |
| 1758 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1759 | /* Allocate new domain for the device */ |
| 1760 | drhd = dmar_find_matched_drhd_unit(pdev); |
| 1761 | if (!drhd) { |
| 1762 | printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", |
| 1763 | pci_name(pdev)); |
| 1764 | return NULL; |
| 1765 | } |
| 1766 | iommu = drhd->iommu; |
| 1767 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1768 | ret = iommu_attach_domain(domain, iommu); |
| 1769 | if (ret) { |
| 1770 | domain_exit(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1771 | goto error; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1772 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1773 | |
| 1774 | if (domain_init(domain, gaw)) { |
| 1775 | domain_exit(domain); |
| 1776 | goto error; |
| 1777 | } |
| 1778 | |
| 1779 | /* register pcie-to-pci device */ |
| 1780 | if (dev_tmp) { |
| 1781 | info = alloc_devinfo_mem(); |
| 1782 | if (!info) { |
| 1783 | domain_exit(domain); |
| 1784 | goto error; |
| 1785 | } |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1786 | info->segment = segment; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1787 | info->bus = bus; |
| 1788 | info->devfn = devfn; |
| 1789 | info->dev = NULL; |
| 1790 | info->domain = domain; |
| 1791 | /* This domain is shared by devices under p2p bridge */ |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 1792 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1793 | |
| 1794 | /* pcie-to-pci bridge already has a domain, uses it */ |
| 1795 | found = NULL; |
| 1796 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1797 | list_for_each_entry(tmp, &device_domain_list, global) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1798 | if (tmp->segment == segment && |
| 1799 | tmp->bus == bus && tmp->devfn == devfn) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1800 | found = tmp->domain; |
| 1801 | break; |
| 1802 | } |
| 1803 | } |
| 1804 | if (found) { |
| 1805 | free_devinfo_mem(info); |
| 1806 | domain_exit(domain); |
| 1807 | domain = found; |
| 1808 | } else { |
| 1809 | list_add(&info->link, &domain->devices); |
| 1810 | list_add(&info->global, &device_domain_list); |
| 1811 | } |
| 1812 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1813 | } |
| 1814 | |
| 1815 | found_domain: |
| 1816 | info = alloc_devinfo_mem(); |
| 1817 | if (!info) |
| 1818 | goto error; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1819 | info->segment = segment; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1820 | info->bus = pdev->bus->number; |
| 1821 | info->devfn = pdev->devfn; |
| 1822 | info->dev = pdev; |
| 1823 | info->domain = domain; |
| 1824 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1825 | /* somebody is fast */ |
| 1826 | found = find_domain(pdev); |
| 1827 | if (found != NULL) { |
| 1828 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1829 | if (found != domain) { |
| 1830 | domain_exit(domain); |
| 1831 | domain = found; |
| 1832 | } |
| 1833 | free_devinfo_mem(info); |
| 1834 | return domain; |
| 1835 | } |
| 1836 | list_add(&info->link, &domain->devices); |
| 1837 | list_add(&info->global, &device_domain_list); |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1838 | pdev->dev.archdata.iommu = info; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1839 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1840 | return domain; |
| 1841 | error: |
| 1842 | /* recheck it here, maybe others set it */ |
| 1843 | return find_domain(pdev); |
| 1844 | } |
| 1845 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1846 | static int iommu_identity_mapping; |
| 1847 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1848 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
| 1849 | unsigned long long start, |
| 1850 | unsigned long long end) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1851 | { |
| 1852 | struct dmar_domain *domain; |
| 1853 | unsigned long size; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1854 | unsigned long long base; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1855 | int ret; |
| 1856 | |
| 1857 | printk(KERN_INFO |
| 1858 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
| 1859 | pci_name(pdev), start, end); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 1860 | if (iommu_identity_mapping) |
| 1861 | domain = si_domain; |
| 1862 | else |
| 1863 | /* page table init */ |
| 1864 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1865 | if (!domain) |
| 1866 | return -ENOMEM; |
| 1867 | |
| 1868 | /* The address might not be aligned */ |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1869 | base = start & PAGE_MASK; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1870 | size = end - base; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1871 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1872 | if (!reserve_iova(&domain->iovad, IOVA_PFN(base), |
| 1873 | IOVA_PFN(base + size) - 1)) { |
| 1874 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
| 1875 | ret = -ENOMEM; |
| 1876 | goto error; |
| 1877 | } |
| 1878 | |
| 1879 | pr_debug("Mapping reserved region %lx@%llx for %s\n", |
| 1880 | size, base, pci_name(pdev)); |
| 1881 | /* |
| 1882 | * RMRR range might have overlap with physical memory range, |
| 1883 | * clear it first |
| 1884 | */ |
| 1885 | dma_pte_clear_range(domain, base, base + size); |
| 1886 | |
| 1887 | ret = domain_page_mapping(domain, base, base, size, |
| 1888 | DMA_PTE_READ|DMA_PTE_WRITE); |
| 1889 | if (ret) |
| 1890 | goto error; |
| 1891 | |
| 1892 | /* context entry init */ |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1893 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1894 | if (!ret) |
| 1895 | return 0; |
| 1896 | error: |
| 1897 | domain_exit(domain); |
| 1898 | return ret; |
| 1899 | |
| 1900 | } |
| 1901 | |
| 1902 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, |
| 1903 | struct pci_dev *pdev) |
| 1904 | { |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1905 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1906 | return 0; |
| 1907 | return iommu_prepare_identity_map(pdev, rmrr->base_address, |
| 1908 | rmrr->end_address + 1); |
| 1909 | } |
| 1910 | |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1911 | #ifdef CONFIG_DMAR_GFX_WA |
Yinghai Lu | d52d53b | 2008-06-16 20:10:55 -0700 | [diff] [blame] | 1912 | struct iommu_prepare_data { |
| 1913 | struct pci_dev *pdev; |
| 1914 | int ret; |
| 1915 | }; |
| 1916 | |
| 1917 | static int __init iommu_prepare_work_fn(unsigned long start_pfn, |
| 1918 | unsigned long end_pfn, void *datax) |
| 1919 | { |
| 1920 | struct iommu_prepare_data *data; |
| 1921 | |
| 1922 | data = (struct iommu_prepare_data *)datax; |
| 1923 | |
| 1924 | data->ret = iommu_prepare_identity_map(data->pdev, |
| 1925 | start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); |
| 1926 | return data->ret; |
| 1927 | |
| 1928 | } |
| 1929 | |
| 1930 | static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev) |
| 1931 | { |
| 1932 | int nid; |
| 1933 | struct iommu_prepare_data data; |
| 1934 | |
| 1935 | data.pdev = pdev; |
| 1936 | data.ret = 0; |
| 1937 | |
| 1938 | for_each_online_node(nid) { |
| 1939 | work_with_active_regions(nid, iommu_prepare_work_fn, &data); |
| 1940 | if (data.ret) |
| 1941 | return data.ret; |
| 1942 | } |
| 1943 | return data.ret; |
| 1944 | } |
| 1945 | |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1946 | static void __init iommu_prepare_gfx_mapping(void) |
| 1947 | { |
| 1948 | struct pci_dev *pdev = NULL; |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1949 | int ret; |
| 1950 | |
| 1951 | for_each_pci_dev(pdev) { |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1952 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO || |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1953 | !IS_GFX_DEVICE(pdev)) |
| 1954 | continue; |
| 1955 | printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n", |
| 1956 | pci_name(pdev)); |
Yinghai Lu | d52d53b | 2008-06-16 20:10:55 -0700 | [diff] [blame] | 1957 | ret = iommu_prepare_with_active_regions(pdev); |
| 1958 | if (ret) |
| 1959 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1960 | } |
| 1961 | } |
Mark McLoughlin | 2abd7e1 | 2008-11-20 15:49:50 +0000 | [diff] [blame] | 1962 | #else /* !CONFIG_DMAR_GFX_WA */ |
| 1963 | static inline void iommu_prepare_gfx_mapping(void) |
| 1964 | { |
| 1965 | return; |
| 1966 | } |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 1967 | #endif |
| 1968 | |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 1969 | #ifdef CONFIG_DMAR_FLOPPY_WA |
| 1970 | static inline void iommu_prepare_isa(void) |
| 1971 | { |
| 1972 | struct pci_dev *pdev; |
| 1973 | int ret; |
| 1974 | |
| 1975 | pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
| 1976 | if (!pdev) |
| 1977 | return; |
| 1978 | |
| 1979 | printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n"); |
| 1980 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); |
| 1981 | |
| 1982 | if (ret) |
Frank Seidel | 1c35b8e | 2009-02-06 10:23:36 +0100 | [diff] [blame] | 1983 | printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, " |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 1984 | "floppy might not work\n"); |
| 1985 | |
| 1986 | } |
| 1987 | #else |
| 1988 | static inline void iommu_prepare_isa(void) |
| 1989 | { |
| 1990 | return; |
| 1991 | } |
| 1992 | #endif /* !CONFIG_DMAR_FLPY_WA */ |
| 1993 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1994 | /* Initialize each context entry as pass through.*/ |
| 1995 | static int __init init_context_pass_through(void) |
| 1996 | { |
| 1997 | struct pci_dev *pdev = NULL; |
| 1998 | struct dmar_domain *domain; |
| 1999 | int ret; |
| 2000 | |
| 2001 | for_each_pci_dev(pdev) { |
| 2002 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 2003 | ret = domain_context_mapping(domain, pdev, |
| 2004 | CONTEXT_TT_PASS_THROUGH); |
| 2005 | if (ret) |
| 2006 | return ret; |
| 2007 | } |
| 2008 | return 0; |
| 2009 | } |
| 2010 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2011 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
| 2012 | static int si_domain_init(void) |
| 2013 | { |
| 2014 | struct dmar_drhd_unit *drhd; |
| 2015 | struct intel_iommu *iommu; |
| 2016 | int ret = 0; |
| 2017 | |
| 2018 | si_domain = alloc_domain(); |
| 2019 | if (!si_domain) |
| 2020 | return -EFAULT; |
| 2021 | |
| 2022 | |
| 2023 | for_each_active_iommu(iommu, drhd) { |
| 2024 | ret = iommu_attach_domain(si_domain, iommu); |
| 2025 | if (ret) { |
| 2026 | domain_exit(si_domain); |
| 2027 | return -EFAULT; |
| 2028 | } |
| 2029 | } |
| 2030 | |
| 2031 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
| 2032 | domain_exit(si_domain); |
| 2033 | return -EFAULT; |
| 2034 | } |
| 2035 | |
| 2036 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; |
| 2037 | |
| 2038 | return 0; |
| 2039 | } |
| 2040 | |
| 2041 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
| 2042 | struct pci_dev *pdev); |
| 2043 | static int identity_mapping(struct pci_dev *pdev) |
| 2044 | { |
| 2045 | struct device_domain_info *info; |
| 2046 | |
| 2047 | if (likely(!iommu_identity_mapping)) |
| 2048 | return 0; |
| 2049 | |
| 2050 | |
| 2051 | list_for_each_entry(info, &si_domain->devices, link) |
| 2052 | if (info->dev == pdev) |
| 2053 | return 1; |
| 2054 | return 0; |
| 2055 | } |
| 2056 | |
| 2057 | static int domain_add_dev_info(struct dmar_domain *domain, |
| 2058 | struct pci_dev *pdev) |
| 2059 | { |
| 2060 | struct device_domain_info *info; |
| 2061 | unsigned long flags; |
| 2062 | |
| 2063 | info = alloc_devinfo_mem(); |
| 2064 | if (!info) |
| 2065 | return -ENOMEM; |
| 2066 | |
| 2067 | info->segment = pci_domain_nr(pdev->bus); |
| 2068 | info->bus = pdev->bus->number; |
| 2069 | info->devfn = pdev->devfn; |
| 2070 | info->dev = pdev; |
| 2071 | info->domain = domain; |
| 2072 | |
| 2073 | spin_lock_irqsave(&device_domain_lock, flags); |
| 2074 | list_add(&info->link, &domain->devices); |
| 2075 | list_add(&info->global, &device_domain_list); |
| 2076 | pdev->dev.archdata.iommu = info; |
| 2077 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 2078 | |
| 2079 | return 0; |
| 2080 | } |
| 2081 | |
| 2082 | static int iommu_prepare_static_identity_mapping(void) |
| 2083 | { |
| 2084 | int i; |
| 2085 | struct pci_dev *pdev = NULL; |
| 2086 | int ret; |
| 2087 | |
| 2088 | ret = si_domain_init(); |
| 2089 | if (ret) |
| 2090 | return -EFAULT; |
| 2091 | |
| 2092 | printk(KERN_INFO "IOMMU: Setting identity map:\n"); |
| 2093 | for_each_pci_dev(pdev) { |
| 2094 | for (i = 0; i < e820.nr_map; i++) { |
| 2095 | struct e820entry *ei = &e820.map[i]; |
| 2096 | |
| 2097 | if (ei->type == E820_RAM) { |
| 2098 | ret = iommu_prepare_identity_map(pdev, |
| 2099 | ei->addr, ei->addr + ei->size); |
| 2100 | if (ret) { |
| 2101 | printk(KERN_INFO "1:1 mapping to one domain failed.\n"); |
| 2102 | return -EFAULT; |
| 2103 | } |
| 2104 | } |
| 2105 | } |
| 2106 | ret = domain_add_dev_info(si_domain, pdev); |
| 2107 | if (ret) |
| 2108 | return ret; |
| 2109 | } |
| 2110 | |
| 2111 | return 0; |
| 2112 | } |
| 2113 | |
| 2114 | int __init init_dmars(void) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2115 | { |
| 2116 | struct dmar_drhd_unit *drhd; |
| 2117 | struct dmar_rmrr_unit *rmrr; |
| 2118 | struct pci_dev *pdev; |
| 2119 | struct intel_iommu *iommu; |
Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 2120 | int i, ret; |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2121 | int pass_through = 1; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2122 | |
| 2123 | /* |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2124 | * In case pass through can not be enabled, iommu tries to use identity |
| 2125 | * mapping. |
| 2126 | */ |
| 2127 | if (iommu_pass_through) |
| 2128 | iommu_identity_mapping = 1; |
| 2129 | |
| 2130 | /* |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2131 | * for each drhd |
| 2132 | * allocate root |
| 2133 | * initialize and program root entry to not present |
| 2134 | * endfor |
| 2135 | */ |
| 2136 | for_each_drhd_unit(drhd) { |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2137 | g_num_of_iommus++; |
| 2138 | /* |
| 2139 | * lock not needed as this is only incremented in the single |
| 2140 | * threaded kernel __init code path all other access are read |
| 2141 | * only |
| 2142 | */ |
| 2143 | } |
| 2144 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2145 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), |
| 2146 | GFP_KERNEL); |
| 2147 | if (!g_iommus) { |
| 2148 | printk(KERN_ERR "Allocating global iommu array failed\n"); |
| 2149 | ret = -ENOMEM; |
| 2150 | goto error; |
| 2151 | } |
| 2152 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2153 | deferred_flush = kzalloc(g_num_of_iommus * |
| 2154 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
| 2155 | if (!deferred_flush) { |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2156 | kfree(g_iommus); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2157 | ret = -ENOMEM; |
| 2158 | goto error; |
| 2159 | } |
| 2160 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2161 | for_each_drhd_unit(drhd) { |
| 2162 | if (drhd->ignored) |
| 2163 | continue; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 2164 | |
| 2165 | iommu = drhd->iommu; |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2166 | g_iommus[iommu->seq_id] = iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2167 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 2168 | ret = iommu_init_domains(iommu); |
| 2169 | if (ret) |
| 2170 | goto error; |
| 2171 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2172 | /* |
| 2173 | * TBD: |
| 2174 | * we could share the same root & context tables |
| 2175 | * amoung all IOMMU's. Need to Split it later. |
| 2176 | */ |
| 2177 | ret = iommu_alloc_root_entry(iommu); |
| 2178 | if (ret) { |
| 2179 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); |
| 2180 | goto error; |
| 2181 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2182 | if (!ecap_pass_through(iommu->ecap)) |
| 2183 | pass_through = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2184 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2185 | if (iommu_pass_through) |
| 2186 | if (!pass_through) { |
| 2187 | printk(KERN_INFO |
| 2188 | "Pass Through is not supported by hardware.\n"); |
| 2189 | iommu_pass_through = 0; |
| 2190 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2191 | |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 2192 | /* |
| 2193 | * Start from the sane iommu hardware state. |
| 2194 | */ |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2195 | for_each_drhd_unit(drhd) { |
| 2196 | if (drhd->ignored) |
| 2197 | continue; |
| 2198 | |
| 2199 | iommu = drhd->iommu; |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 2200 | |
| 2201 | /* |
| 2202 | * If the queued invalidation is already initialized by us |
| 2203 | * (for example, while enabling interrupt-remapping) then |
| 2204 | * we got the things already rolling from a sane state. |
| 2205 | */ |
| 2206 | if (iommu->qi) |
| 2207 | continue; |
| 2208 | |
| 2209 | /* |
| 2210 | * Clear any previous faults. |
| 2211 | */ |
| 2212 | dmar_fault(-1, iommu); |
| 2213 | /* |
| 2214 | * Disable queued invalidation if supported and already enabled |
| 2215 | * before OS handover. |
| 2216 | */ |
| 2217 | dmar_disable_qi(iommu); |
| 2218 | } |
| 2219 | |
| 2220 | for_each_drhd_unit(drhd) { |
| 2221 | if (drhd->ignored) |
| 2222 | continue; |
| 2223 | |
| 2224 | iommu = drhd->iommu; |
| 2225 | |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2226 | if (dmar_enable_qi(iommu)) { |
| 2227 | /* |
| 2228 | * Queued Invalidate not enabled, use Register Based |
| 2229 | * Invalidate |
| 2230 | */ |
| 2231 | iommu->flush.flush_context = __iommu_flush_context; |
| 2232 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; |
| 2233 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " |
FUJITA Tomonori | b4e0f9e | 2008-11-19 13:53:42 +0900 | [diff] [blame] | 2234 | "invalidation\n", |
| 2235 | (unsigned long long)drhd->reg_base_addr); |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2236 | } else { |
| 2237 | iommu->flush.flush_context = qi_flush_context; |
| 2238 | iommu->flush.flush_iotlb = qi_flush_iotlb; |
| 2239 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " |
FUJITA Tomonori | b4e0f9e | 2008-11-19 13:53:42 +0900 | [diff] [blame] | 2240 | "invalidation\n", |
| 2241 | (unsigned long long)drhd->reg_base_addr); |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2242 | } |
| 2243 | } |
| 2244 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2245 | /* |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2246 | * If pass through is set and enabled, context entries of all pci |
| 2247 | * devices are intialized by pass through translation type. |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2248 | */ |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2249 | if (iommu_pass_through) { |
| 2250 | ret = init_context_pass_through(); |
| 2251 | if (ret) { |
| 2252 | printk(KERN_ERR "IOMMU: Pass through init failed.\n"); |
| 2253 | iommu_pass_through = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2254 | } |
| 2255 | } |
| 2256 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2257 | /* |
| 2258 | * If pass through is not set or not enabled, setup context entries for |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2259 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
| 2260 | * identity mapping if iommu_identity_mapping is set. |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2261 | */ |
| 2262 | if (!iommu_pass_through) { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2263 | if (iommu_identity_mapping) |
| 2264 | iommu_prepare_static_identity_mapping(); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2265 | /* |
| 2266 | * For each rmrr |
| 2267 | * for each dev attached to rmrr |
| 2268 | * do |
| 2269 | * locate drhd for dev, alloc domain for dev |
| 2270 | * allocate free domain |
| 2271 | * allocate page table entries for rmrr |
| 2272 | * if context not allocated for bus |
| 2273 | * allocate and init context |
| 2274 | * set present in root table for this bus |
| 2275 | * init context with domain, translation etc |
| 2276 | * endfor |
| 2277 | * endfor |
| 2278 | */ |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2279 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2280 | for_each_rmrr_units(rmrr) { |
| 2281 | for (i = 0; i < rmrr->devices_cnt; i++) { |
| 2282 | pdev = rmrr->devices[i]; |
| 2283 | /* |
| 2284 | * some BIOS lists non-exist devices in DMAR |
| 2285 | * table. |
| 2286 | */ |
| 2287 | if (!pdev) |
| 2288 | continue; |
| 2289 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); |
| 2290 | if (ret) |
| 2291 | printk(KERN_ERR |
| 2292 | "IOMMU: mapping reserved region failed\n"); |
| 2293 | } |
| 2294 | } |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 2295 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2296 | iommu_prepare_gfx_mapping(); |
| 2297 | |
| 2298 | iommu_prepare_isa(); |
| 2299 | } |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 2300 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2301 | /* |
| 2302 | * for each drhd |
| 2303 | * enable fault log |
| 2304 | * global invalidate context cache |
| 2305 | * global invalidate iotlb |
| 2306 | * enable translation |
| 2307 | */ |
| 2308 | for_each_drhd_unit(drhd) { |
| 2309 | if (drhd->ignored) |
| 2310 | continue; |
| 2311 | iommu = drhd->iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2312 | |
| 2313 | iommu_flush_write_buffer(iommu); |
| 2314 | |
Keshavamurthy, Anil S | 3460a6d | 2007-10-21 16:41:54 -0700 | [diff] [blame] | 2315 | ret = dmar_set_interrupt(iommu); |
| 2316 | if (ret) |
| 2317 | goto error; |
| 2318 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2319 | iommu_set_root_entry(iommu); |
| 2320 | |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 2321 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2322 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
mark gross | f8bab73 | 2008-02-08 04:18:38 -0800 | [diff] [blame] | 2323 | iommu_disable_protect_mem_regions(iommu); |
| 2324 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2325 | ret = iommu_enable_translation(iommu); |
| 2326 | if (ret) |
| 2327 | goto error; |
| 2328 | } |
| 2329 | |
| 2330 | return 0; |
| 2331 | error: |
| 2332 | for_each_drhd_unit(drhd) { |
| 2333 | if (drhd->ignored) |
| 2334 | continue; |
| 2335 | iommu = drhd->iommu; |
| 2336 | free_iommu(iommu); |
| 2337 | } |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2338 | kfree(g_iommus); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2339 | return ret; |
| 2340 | } |
| 2341 | |
| 2342 | static inline u64 aligned_size(u64 host_addr, size_t size) |
| 2343 | { |
| 2344 | u64 addr; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2345 | addr = (host_addr & (~PAGE_MASK)) + size; |
| 2346 | return PAGE_ALIGN(addr); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2347 | } |
| 2348 | |
| 2349 | struct iova * |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2350 | iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2351 | { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2352 | struct iova *piova; |
| 2353 | |
| 2354 | /* Make sure it's in range */ |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2355 | end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2356 | if (!size || (IOVA_START_ADDR + size > end)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2357 | return NULL; |
| 2358 | |
| 2359 | piova = alloc_iova(&domain->iovad, |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2360 | size >> PAGE_SHIFT, IOVA_PFN(end), 1); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2361 | return piova; |
| 2362 | } |
| 2363 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2364 | static struct iova * |
| 2365 | __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2366 | size_t size, u64 dma_mask) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2367 | { |
| 2368 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2369 | struct iova *iova = NULL; |
| 2370 | |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 2371 | if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac) |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2372 | iova = iommu_alloc_iova(domain, size, dma_mask); |
| 2373 | else { |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2374 | /* |
| 2375 | * First try to allocate an io virtual address in |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 2376 | * DMA_BIT_MASK(32) and if that fails then try allocating |
Joe Perches | 3609801 | 2007-12-17 11:40:11 -0800 | [diff] [blame] | 2377 | * from higher range |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2378 | */ |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 2379 | iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32)); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2380 | if (!iova) |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2381 | iova = iommu_alloc_iova(domain, size, dma_mask); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2382 | } |
| 2383 | |
| 2384 | if (!iova) { |
| 2385 | printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev)); |
| 2386 | return NULL; |
| 2387 | } |
| 2388 | |
| 2389 | return iova; |
| 2390 | } |
| 2391 | |
| 2392 | static struct dmar_domain * |
| 2393 | get_valid_domain_for_dev(struct pci_dev *pdev) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2394 | { |
| 2395 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2396 | int ret; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2397 | |
| 2398 | domain = get_domain_for_dev(pdev, |
| 2399 | DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 2400 | if (!domain) { |
| 2401 | printk(KERN_ERR |
| 2402 | "Allocating domain for %s failed", pci_name(pdev)); |
Al Viro | 4fe05bb | 2007-10-29 04:51:16 +0000 | [diff] [blame] | 2403 | return NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2404 | } |
| 2405 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2406 | /* make sure context mapping is ok */ |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 2407 | if (unlikely(!domain_context_mapped(pdev))) { |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2408 | ret = domain_context_mapping(domain, pdev, |
| 2409 | CONTEXT_TT_MULTI_LEVEL); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2410 | if (ret) { |
| 2411 | printk(KERN_ERR |
| 2412 | "Domain context map for %s failed", |
| 2413 | pci_name(pdev)); |
Al Viro | 4fe05bb | 2007-10-29 04:51:16 +0000 | [diff] [blame] | 2414 | return NULL; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2415 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2416 | } |
| 2417 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2418 | return domain; |
| 2419 | } |
| 2420 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2421 | static int iommu_dummy(struct pci_dev *pdev) |
| 2422 | { |
| 2423 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; |
| 2424 | } |
| 2425 | |
| 2426 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ |
| 2427 | static int iommu_no_mapping(struct pci_dev *pdev) |
| 2428 | { |
| 2429 | int found; |
| 2430 | |
| 2431 | if (!iommu_identity_mapping) |
| 2432 | return iommu_dummy(pdev); |
| 2433 | |
| 2434 | found = identity_mapping(pdev); |
| 2435 | if (found) { |
| 2436 | if (pdev->dma_mask > DMA_BIT_MASK(32)) |
| 2437 | return 1; |
| 2438 | else { |
| 2439 | /* |
| 2440 | * 32 bit DMA is removed from si_domain and fall back |
| 2441 | * to non-identity mapping. |
| 2442 | */ |
| 2443 | domain_remove_one_dev_info(si_domain, pdev); |
| 2444 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", |
| 2445 | pci_name(pdev)); |
| 2446 | return 0; |
| 2447 | } |
| 2448 | } else { |
| 2449 | /* |
| 2450 | * In case of a detached 64 bit DMA device from vm, the device |
| 2451 | * is put into si_domain for identity mapping. |
| 2452 | */ |
| 2453 | if (pdev->dma_mask > DMA_BIT_MASK(32)) { |
| 2454 | int ret; |
| 2455 | ret = domain_add_dev_info(si_domain, pdev); |
| 2456 | if (!ret) { |
| 2457 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
| 2458 | pci_name(pdev)); |
| 2459 | return 1; |
| 2460 | } |
| 2461 | } |
| 2462 | } |
| 2463 | |
| 2464 | return iommu_dummy(pdev); |
| 2465 | } |
| 2466 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2467 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
| 2468 | size_t size, int dir, u64 dma_mask) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2469 | { |
| 2470 | struct pci_dev *pdev = to_pci_dev(hwdev); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2471 | struct dmar_domain *domain; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2472 | phys_addr_t start_paddr; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2473 | struct iova *iova; |
| 2474 | int prot = 0; |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2475 | int ret; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2476 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2477 | |
| 2478 | BUG_ON(dir == DMA_NONE); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2479 | |
| 2480 | if (iommu_no_mapping(pdev)) |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2481 | return paddr; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2482 | |
| 2483 | domain = get_valid_domain_for_dev(pdev); |
| 2484 | if (!domain) |
| 2485 | return 0; |
| 2486 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2487 | iommu = domain_get_iommu(domain); |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2488 | size = aligned_size((u64)paddr, size); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2489 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2490 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2491 | if (!iova) |
| 2492 | goto error; |
| 2493 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2494 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2495 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2496 | /* |
| 2497 | * Check if DMAR supports zero-length reads on write only |
| 2498 | * mappings.. |
| 2499 | */ |
| 2500 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2501 | !cap_zlr(iommu->cap)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2502 | prot |= DMA_PTE_READ; |
| 2503 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 2504 | prot |= DMA_PTE_WRITE; |
| 2505 | /* |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2506 | * paddr - (paddr + size) might be partial page, we should map the whole |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2507 | * page. Note: if two part of one page are separately mapped, we |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2508 | * might have two guest_addr mapping to the same host paddr, but this |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2509 | * is not a big problem |
| 2510 | */ |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2511 | ret = domain_page_mapping(domain, start_paddr, |
David Woodhouse | fd18de5 | 2009-05-10 23:57:41 +0100 | [diff] [blame] | 2512 | ((u64)paddr) & PHYSICAL_PAGE_MASK, |
| 2513 | size, prot); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2514 | if (ret) |
| 2515 | goto error; |
| 2516 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2517 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 2518 | if (cap_caching_mode(iommu->cap)) |
| 2519 | iommu_flush_iotlb_psi(iommu, 0, start_paddr, |
| 2520 | size >> VTD_PAGE_SHIFT); |
| 2521 | else |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2522 | iommu_flush_write_buffer(iommu); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2523 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2524 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2525 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2526 | error: |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2527 | if (iova) |
| 2528 | __free_iova(&domain->iovad, iova); |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2529 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2530 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2531 | return 0; |
| 2532 | } |
| 2533 | |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2534 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, |
| 2535 | unsigned long offset, size_t size, |
| 2536 | enum dma_data_direction dir, |
| 2537 | struct dma_attrs *attrs) |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2538 | { |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2539 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
| 2540 | dir, to_pci_dev(dev)->dma_mask); |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2541 | } |
| 2542 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2543 | static void flush_unmaps(void) |
| 2544 | { |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2545 | int i, j; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2546 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2547 | timer_on = 0; |
| 2548 | |
| 2549 | /* just flush them all */ |
| 2550 | for (i = 0; i < g_num_of_iommus; i++) { |
Weidong Han | a2bb845 | 2008-12-08 11:24:12 +0800 | [diff] [blame] | 2551 | struct intel_iommu *iommu = g_iommus[i]; |
| 2552 | if (!iommu) |
| 2553 | continue; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 2554 | |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2555 | if (!deferred_flush[i].next) |
| 2556 | continue; |
| 2557 | |
| 2558 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2559 | DMA_TLB_GLOBAL_FLUSH); |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2560 | for (j = 0; j < deferred_flush[i].next; j++) { |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2561 | unsigned long mask; |
| 2562 | struct iova *iova = deferred_flush[i].iova[j]; |
| 2563 | |
| 2564 | mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; |
| 2565 | mask = ilog2(mask >> VTD_PAGE_SHIFT); |
| 2566 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
| 2567 | iova->pfn_lo << PAGE_SHIFT, mask); |
| 2568 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2569 | } |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2570 | deferred_flush[i].next = 0; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2571 | } |
| 2572 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2573 | list_size = 0; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2574 | } |
| 2575 | |
| 2576 | static void flush_unmaps_timeout(unsigned long data) |
| 2577 | { |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2578 | unsigned long flags; |
| 2579 | |
| 2580 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2581 | flush_unmaps(); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2582 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2583 | } |
| 2584 | |
| 2585 | static void add_unmap(struct dmar_domain *dom, struct iova *iova) |
| 2586 | { |
| 2587 | unsigned long flags; |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2588 | int next, iommu_id; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2589 | struct intel_iommu *iommu; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2590 | |
| 2591 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2592 | if (list_size == HIGH_WATER_MARK) |
| 2593 | flush_unmaps(); |
| 2594 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2595 | iommu = domain_get_iommu(dom); |
| 2596 | iommu_id = iommu->seq_id; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 2597 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2598 | next = deferred_flush[iommu_id].next; |
| 2599 | deferred_flush[iommu_id].domain[next] = dom; |
| 2600 | deferred_flush[iommu_id].iova[next] = iova; |
| 2601 | deferred_flush[iommu_id].next++; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2602 | |
| 2603 | if (!timer_on) { |
| 2604 | mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); |
| 2605 | timer_on = 1; |
| 2606 | } |
| 2607 | list_size++; |
| 2608 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
| 2609 | } |
| 2610 | |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2611 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
| 2612 | size_t size, enum dma_data_direction dir, |
| 2613 | struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2614 | { |
| 2615 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2616 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2617 | unsigned long start_addr; |
| 2618 | struct iova *iova; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2619 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2620 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2621 | if (iommu_no_mapping(pdev)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2622 | return; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2623 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2624 | domain = find_domain(pdev); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2625 | BUG_ON(!domain); |
| 2626 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2627 | iommu = domain_get_iommu(domain); |
| 2628 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2629 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
| 2630 | if (!iova) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2631 | return; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2632 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2633 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2634 | size = aligned_size((u64)dev_addr, size); |
| 2635 | |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2636 | pr_debug("Device %s unmapping: %zx@%llx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2637 | pci_name(pdev), size, (unsigned long long)start_addr); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2638 | |
| 2639 | /* clear the whole page */ |
| 2640 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
| 2641 | /* free page tables */ |
| 2642 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2643 | if (intel_iommu_strict) { |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2644 | iommu_flush_iotlb_psi(iommu, domain->id, start_addr, |
| 2645 | size >> VTD_PAGE_SHIFT); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2646 | /* free iova */ |
| 2647 | __free_iova(&domain->iovad, iova); |
| 2648 | } else { |
| 2649 | add_unmap(domain, iova); |
| 2650 | /* |
| 2651 | * queue up the release of the unmap to save the 1/6th of the |
| 2652 | * cpu used up by the iotlb flush operation... |
| 2653 | */ |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2654 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2655 | } |
| 2656 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2657 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
| 2658 | int dir) |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2659 | { |
| 2660 | intel_unmap_page(dev, dev_addr, size, dir, NULL); |
| 2661 | } |
| 2662 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2663 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, |
| 2664 | dma_addr_t *dma_handle, gfp_t flags) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2665 | { |
| 2666 | void *vaddr; |
| 2667 | int order; |
| 2668 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2669 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2670 | order = get_order(size); |
| 2671 | flags &= ~(GFP_DMA | GFP_DMA32); |
| 2672 | |
| 2673 | vaddr = (void *)__get_free_pages(flags, order); |
| 2674 | if (!vaddr) |
| 2675 | return NULL; |
| 2676 | memset(vaddr, 0, size); |
| 2677 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2678 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
| 2679 | DMA_BIDIRECTIONAL, |
| 2680 | hwdev->coherent_dma_mask); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2681 | if (*dma_handle) |
| 2682 | return vaddr; |
| 2683 | free_pages((unsigned long)vaddr, order); |
| 2684 | return NULL; |
| 2685 | } |
| 2686 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2687 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
| 2688 | dma_addr_t dma_handle) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2689 | { |
| 2690 | int order; |
| 2691 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2692 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2693 | order = get_order(size); |
| 2694 | |
| 2695 | intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); |
| 2696 | free_pages((unsigned long)vaddr, order); |
| 2697 | } |
| 2698 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2699 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
| 2700 | int nelems, enum dma_data_direction dir, |
| 2701 | struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2702 | { |
| 2703 | int i; |
| 2704 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| 2705 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2706 | unsigned long start_addr; |
| 2707 | struct iova *iova; |
| 2708 | size_t size = 0; |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2709 | phys_addr_t addr; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2710 | struct scatterlist *sg; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2711 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2712 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2713 | if (iommu_no_mapping(pdev)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2714 | return; |
| 2715 | |
| 2716 | domain = find_domain(pdev); |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2717 | BUG_ON(!domain); |
| 2718 | |
| 2719 | iommu = domain_get_iommu(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2720 | |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2721 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2722 | if (!iova) |
| 2723 | return; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2724 | for_each_sg(sglist, sg, nelems, i) { |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2725 | addr = page_to_phys(sg_page(sg)) + sg->offset; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2726 | size += aligned_size((u64)addr, sg->length); |
| 2727 | } |
| 2728 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2729 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2730 | |
| 2731 | /* clear the whole page */ |
| 2732 | dma_pte_clear_range(domain, start_addr, start_addr + size); |
| 2733 | /* free page tables */ |
| 2734 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
| 2735 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2736 | iommu_flush_iotlb_psi(iommu, domain->id, start_addr, |
| 2737 | size >> VTD_PAGE_SHIFT); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2738 | |
| 2739 | /* free iova */ |
| 2740 | __free_iova(&domain->iovad, iova); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2741 | } |
| 2742 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2743 | static int intel_nontranslate_map_sg(struct device *hddev, |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2744 | struct scatterlist *sglist, int nelems, int dir) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2745 | { |
| 2746 | int i; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2747 | struct scatterlist *sg; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2748 | |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2749 | for_each_sg(sglist, sg, nelems, i) { |
FUJITA Tomonori | 12d4d40 | 2007-10-23 09:32:25 +0200 | [diff] [blame] | 2750 | BUG_ON(!sg_page(sg)); |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2751 | sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2752 | sg->dma_length = sg->length; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2753 | } |
| 2754 | return nelems; |
| 2755 | } |
| 2756 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2757 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
| 2758 | enum dma_data_direction dir, struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2759 | { |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2760 | phys_addr_t addr; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2761 | int i; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2762 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| 2763 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2764 | size_t size = 0; |
| 2765 | int prot = 0; |
| 2766 | size_t offset = 0; |
| 2767 | struct iova *iova = NULL; |
| 2768 | int ret; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2769 | struct scatterlist *sg; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2770 | unsigned long start_addr; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2771 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2772 | |
| 2773 | BUG_ON(dir == DMA_NONE); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 2774 | if (iommu_no_mapping(pdev)) |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2775 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2776 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2777 | domain = get_valid_domain_for_dev(pdev); |
| 2778 | if (!domain) |
| 2779 | return 0; |
| 2780 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2781 | iommu = domain_get_iommu(domain); |
| 2782 | |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2783 | for_each_sg(sglist, sg, nelems, i) { |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2784 | addr = page_to_phys(sg_page(sg)) + sg->offset; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2785 | size += aligned_size((u64)addr, sg->length); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2786 | } |
| 2787 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2788 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2789 | if (!iova) { |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2790 | sglist->dma_length = 0; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2791 | return 0; |
| 2792 | } |
| 2793 | |
| 2794 | /* |
| 2795 | * Check if DMAR supports zero-length reads on write only |
| 2796 | * mappings.. |
| 2797 | */ |
| 2798 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2799 | !cap_zlr(iommu->cap)) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2800 | prot |= DMA_PTE_READ; |
| 2801 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 2802 | prot |= DMA_PTE_WRITE; |
| 2803 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2804 | start_addr = iova->pfn_lo << PAGE_SHIFT; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2805 | offset = 0; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2806 | for_each_sg(sglist, sg, nelems, i) { |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2807 | addr = page_to_phys(sg_page(sg)) + sg->offset; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2808 | size = aligned_size((u64)addr, sg->length); |
| 2809 | ret = domain_page_mapping(domain, start_addr + offset, |
David Woodhouse | fd18de5 | 2009-05-10 23:57:41 +0100 | [diff] [blame] | 2810 | ((u64)addr) & PHYSICAL_PAGE_MASK, |
| 2811 | size, prot); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2812 | if (ret) { |
| 2813 | /* clear the page */ |
| 2814 | dma_pte_clear_range(domain, start_addr, |
| 2815 | start_addr + offset); |
| 2816 | /* free page tables */ |
| 2817 | dma_pte_free_pagetable(domain, start_addr, |
| 2818 | start_addr + offset); |
| 2819 | /* free iova */ |
| 2820 | __free_iova(&domain->iovad, iova); |
| 2821 | return 0; |
| 2822 | } |
| 2823 | sg->dma_address = start_addr + offset + |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2824 | ((u64)addr & (~PAGE_MASK)); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2825 | sg->dma_length = sg->length; |
| 2826 | offset += size; |
| 2827 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2828 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2829 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 2830 | if (cap_caching_mode(iommu->cap)) |
| 2831 | iommu_flush_iotlb_psi(iommu, 0, start_addr, |
| 2832 | offset >> VTD_PAGE_SHIFT); |
| 2833 | else |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2834 | iommu_flush_write_buffer(iommu); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2835 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2836 | return nelems; |
| 2837 | } |
| 2838 | |
FUJITA Tomonori | dfb805e | 2009-01-28 21:53:17 +0900 | [diff] [blame] | 2839 | static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 2840 | { |
| 2841 | return !dma_addr; |
| 2842 | } |
| 2843 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 2844 | struct dma_map_ops intel_dma_ops = { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2845 | .alloc_coherent = intel_alloc_coherent, |
| 2846 | .free_coherent = intel_free_coherent, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2847 | .map_sg = intel_map_sg, |
| 2848 | .unmap_sg = intel_unmap_sg, |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2849 | .map_page = intel_map_page, |
| 2850 | .unmap_page = intel_unmap_page, |
FUJITA Tomonori | dfb805e | 2009-01-28 21:53:17 +0900 | [diff] [blame] | 2851 | .mapping_error = intel_mapping_error, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2852 | }; |
| 2853 | |
| 2854 | static inline int iommu_domain_cache_init(void) |
| 2855 | { |
| 2856 | int ret = 0; |
| 2857 | |
| 2858 | iommu_domain_cache = kmem_cache_create("iommu_domain", |
| 2859 | sizeof(struct dmar_domain), |
| 2860 | 0, |
| 2861 | SLAB_HWCACHE_ALIGN, |
| 2862 | |
| 2863 | NULL); |
| 2864 | if (!iommu_domain_cache) { |
| 2865 | printk(KERN_ERR "Couldn't create iommu_domain cache\n"); |
| 2866 | ret = -ENOMEM; |
| 2867 | } |
| 2868 | |
| 2869 | return ret; |
| 2870 | } |
| 2871 | |
| 2872 | static inline int iommu_devinfo_cache_init(void) |
| 2873 | { |
| 2874 | int ret = 0; |
| 2875 | |
| 2876 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", |
| 2877 | sizeof(struct device_domain_info), |
| 2878 | 0, |
| 2879 | SLAB_HWCACHE_ALIGN, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2880 | NULL); |
| 2881 | if (!iommu_devinfo_cache) { |
| 2882 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
| 2883 | ret = -ENOMEM; |
| 2884 | } |
| 2885 | |
| 2886 | return ret; |
| 2887 | } |
| 2888 | |
| 2889 | static inline int iommu_iova_cache_init(void) |
| 2890 | { |
| 2891 | int ret = 0; |
| 2892 | |
| 2893 | iommu_iova_cache = kmem_cache_create("iommu_iova", |
| 2894 | sizeof(struct iova), |
| 2895 | 0, |
| 2896 | SLAB_HWCACHE_ALIGN, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2897 | NULL); |
| 2898 | if (!iommu_iova_cache) { |
| 2899 | printk(KERN_ERR "Couldn't create iova cache\n"); |
| 2900 | ret = -ENOMEM; |
| 2901 | } |
| 2902 | |
| 2903 | return ret; |
| 2904 | } |
| 2905 | |
| 2906 | static int __init iommu_init_mempool(void) |
| 2907 | { |
| 2908 | int ret; |
| 2909 | ret = iommu_iova_cache_init(); |
| 2910 | if (ret) |
| 2911 | return ret; |
| 2912 | |
| 2913 | ret = iommu_domain_cache_init(); |
| 2914 | if (ret) |
| 2915 | goto domain_error; |
| 2916 | |
| 2917 | ret = iommu_devinfo_cache_init(); |
| 2918 | if (!ret) |
| 2919 | return ret; |
| 2920 | |
| 2921 | kmem_cache_destroy(iommu_domain_cache); |
| 2922 | domain_error: |
| 2923 | kmem_cache_destroy(iommu_iova_cache); |
| 2924 | |
| 2925 | return -ENOMEM; |
| 2926 | } |
| 2927 | |
| 2928 | static void __init iommu_exit_mempool(void) |
| 2929 | { |
| 2930 | kmem_cache_destroy(iommu_devinfo_cache); |
| 2931 | kmem_cache_destroy(iommu_domain_cache); |
| 2932 | kmem_cache_destroy(iommu_iova_cache); |
| 2933 | |
| 2934 | } |
| 2935 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2936 | static void __init init_no_remapping_devices(void) |
| 2937 | { |
| 2938 | struct dmar_drhd_unit *drhd; |
| 2939 | |
| 2940 | for_each_drhd_unit(drhd) { |
| 2941 | if (!drhd->include_all) { |
| 2942 | int i; |
| 2943 | for (i = 0; i < drhd->devices_cnt; i++) |
| 2944 | if (drhd->devices[i] != NULL) |
| 2945 | break; |
| 2946 | /* ignore DMAR unit if no pci devices exist */ |
| 2947 | if (i == drhd->devices_cnt) |
| 2948 | drhd->ignored = 1; |
| 2949 | } |
| 2950 | } |
| 2951 | |
| 2952 | if (dmar_map_gfx) |
| 2953 | return; |
| 2954 | |
| 2955 | for_each_drhd_unit(drhd) { |
| 2956 | int i; |
| 2957 | if (drhd->ignored || drhd->include_all) |
| 2958 | continue; |
| 2959 | |
| 2960 | for (i = 0; i < drhd->devices_cnt; i++) |
| 2961 | if (drhd->devices[i] && |
| 2962 | !IS_GFX_DEVICE(drhd->devices[i])) |
| 2963 | break; |
| 2964 | |
| 2965 | if (i < drhd->devices_cnt) |
| 2966 | continue; |
| 2967 | |
| 2968 | /* bypass IOMMU if it is just for gfx devices */ |
| 2969 | drhd->ignored = 1; |
| 2970 | for (i = 0; i < drhd->devices_cnt; i++) { |
| 2971 | if (!drhd->devices[i]) |
| 2972 | continue; |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 2973 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2974 | } |
| 2975 | } |
| 2976 | } |
| 2977 | |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 2978 | #ifdef CONFIG_SUSPEND |
| 2979 | static int init_iommu_hw(void) |
| 2980 | { |
| 2981 | struct dmar_drhd_unit *drhd; |
| 2982 | struct intel_iommu *iommu = NULL; |
| 2983 | |
| 2984 | for_each_active_iommu(iommu, drhd) |
| 2985 | if (iommu->qi) |
| 2986 | dmar_reenable_qi(iommu); |
| 2987 | |
| 2988 | for_each_active_iommu(iommu, drhd) { |
| 2989 | iommu_flush_write_buffer(iommu); |
| 2990 | |
| 2991 | iommu_set_root_entry(iommu); |
| 2992 | |
| 2993 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2994 | DMA_CCMD_GLOBAL_INVL); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 2995 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2996 | DMA_TLB_GLOBAL_FLUSH); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 2997 | iommu_disable_protect_mem_regions(iommu); |
| 2998 | iommu_enable_translation(iommu); |
| 2999 | } |
| 3000 | |
| 3001 | return 0; |
| 3002 | } |
| 3003 | |
| 3004 | static void iommu_flush_all(void) |
| 3005 | { |
| 3006 | struct dmar_drhd_unit *drhd; |
| 3007 | struct intel_iommu *iommu; |
| 3008 | |
| 3009 | for_each_active_iommu(iommu, drhd) { |
| 3010 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3011 | DMA_CCMD_GLOBAL_INVL); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3012 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3013 | DMA_TLB_GLOBAL_FLUSH); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3014 | } |
| 3015 | } |
| 3016 | |
| 3017 | static int iommu_suspend(struct sys_device *dev, pm_message_t state) |
| 3018 | { |
| 3019 | struct dmar_drhd_unit *drhd; |
| 3020 | struct intel_iommu *iommu = NULL; |
| 3021 | unsigned long flag; |
| 3022 | |
| 3023 | for_each_active_iommu(iommu, drhd) { |
| 3024 | iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, |
| 3025 | GFP_ATOMIC); |
| 3026 | if (!iommu->iommu_state) |
| 3027 | goto nomem; |
| 3028 | } |
| 3029 | |
| 3030 | iommu_flush_all(); |
| 3031 | |
| 3032 | for_each_active_iommu(iommu, drhd) { |
| 3033 | iommu_disable_translation(iommu); |
| 3034 | |
| 3035 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 3036 | |
| 3037 | iommu->iommu_state[SR_DMAR_FECTL_REG] = |
| 3038 | readl(iommu->reg + DMAR_FECTL_REG); |
| 3039 | iommu->iommu_state[SR_DMAR_FEDATA_REG] = |
| 3040 | readl(iommu->reg + DMAR_FEDATA_REG); |
| 3041 | iommu->iommu_state[SR_DMAR_FEADDR_REG] = |
| 3042 | readl(iommu->reg + DMAR_FEADDR_REG); |
| 3043 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = |
| 3044 | readl(iommu->reg + DMAR_FEUADDR_REG); |
| 3045 | |
| 3046 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3047 | } |
| 3048 | return 0; |
| 3049 | |
| 3050 | nomem: |
| 3051 | for_each_active_iommu(iommu, drhd) |
| 3052 | kfree(iommu->iommu_state); |
| 3053 | |
| 3054 | return -ENOMEM; |
| 3055 | } |
| 3056 | |
| 3057 | static int iommu_resume(struct sys_device *dev) |
| 3058 | { |
| 3059 | struct dmar_drhd_unit *drhd; |
| 3060 | struct intel_iommu *iommu = NULL; |
| 3061 | unsigned long flag; |
| 3062 | |
| 3063 | if (init_iommu_hw()) { |
| 3064 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); |
| 3065 | return -EIO; |
| 3066 | } |
| 3067 | |
| 3068 | for_each_active_iommu(iommu, drhd) { |
| 3069 | |
| 3070 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 3071 | |
| 3072 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], |
| 3073 | iommu->reg + DMAR_FECTL_REG); |
| 3074 | writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], |
| 3075 | iommu->reg + DMAR_FEDATA_REG); |
| 3076 | writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], |
| 3077 | iommu->reg + DMAR_FEADDR_REG); |
| 3078 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], |
| 3079 | iommu->reg + DMAR_FEUADDR_REG); |
| 3080 | |
| 3081 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3082 | } |
| 3083 | |
| 3084 | for_each_active_iommu(iommu, drhd) |
| 3085 | kfree(iommu->iommu_state); |
| 3086 | |
| 3087 | return 0; |
| 3088 | } |
| 3089 | |
| 3090 | static struct sysdev_class iommu_sysclass = { |
| 3091 | .name = "iommu", |
| 3092 | .resume = iommu_resume, |
| 3093 | .suspend = iommu_suspend, |
| 3094 | }; |
| 3095 | |
| 3096 | static struct sys_device device_iommu = { |
| 3097 | .cls = &iommu_sysclass, |
| 3098 | }; |
| 3099 | |
| 3100 | static int __init init_iommu_sysfs(void) |
| 3101 | { |
| 3102 | int error; |
| 3103 | |
| 3104 | error = sysdev_class_register(&iommu_sysclass); |
| 3105 | if (error) |
| 3106 | return error; |
| 3107 | |
| 3108 | error = sysdev_register(&device_iommu); |
| 3109 | if (error) |
| 3110 | sysdev_class_unregister(&iommu_sysclass); |
| 3111 | |
| 3112 | return error; |
| 3113 | } |
| 3114 | |
| 3115 | #else |
| 3116 | static int __init init_iommu_sysfs(void) |
| 3117 | { |
| 3118 | return 0; |
| 3119 | } |
| 3120 | #endif /* CONFIG_PM */ |
| 3121 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3122 | int __init intel_iommu_init(void) |
| 3123 | { |
| 3124 | int ret = 0; |
| 3125 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3126 | if (dmar_table_init()) |
| 3127 | return -ENODEV; |
| 3128 | |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 3129 | if (dmar_dev_scope_init()) |
| 3130 | return -ENODEV; |
| 3131 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 3132 | /* |
| 3133 | * Check the need for DMA-remapping initialization now. |
| 3134 | * Above initialization will also be used by Interrupt-remapping. |
| 3135 | */ |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 3136 | if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 3137 | return -ENODEV; |
| 3138 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3139 | iommu_init_mempool(); |
| 3140 | dmar_init_reserved_ranges(); |
| 3141 | |
| 3142 | init_no_remapping_devices(); |
| 3143 | |
| 3144 | ret = init_dmars(); |
| 3145 | if (ret) { |
| 3146 | printk(KERN_ERR "IOMMU: dmar init failed\n"); |
| 3147 | put_iova_domain(&reserved_iova_list); |
| 3148 | iommu_exit_mempool(); |
| 3149 | return ret; |
| 3150 | } |
| 3151 | printk(KERN_INFO |
| 3152 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
| 3153 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 3154 | init_timer(&unmap_timer); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3155 | force_iommu = 1; |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 3156 | |
| 3157 | if (!iommu_pass_through) { |
| 3158 | printk(KERN_INFO |
| 3159 | "Multi-level page-table translation for DMAR.\n"); |
| 3160 | dma_ops = &intel_dma_ops; |
| 3161 | } else |
| 3162 | printk(KERN_INFO |
| 3163 | "DMAR: Pass through translation for DMAR.\n"); |
| 3164 | |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3165 | init_iommu_sysfs(); |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3166 | |
| 3167 | register_iommu(&intel_iommu_ops); |
| 3168 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3169 | return 0; |
| 3170 | } |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 3171 | |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3172 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
| 3173 | struct pci_dev *pdev) |
| 3174 | { |
| 3175 | struct pci_dev *tmp, *parent; |
| 3176 | |
| 3177 | if (!iommu || !pdev) |
| 3178 | return; |
| 3179 | |
| 3180 | /* dependent device detach */ |
| 3181 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 3182 | /* Secondary interface's bus number and devfn 0 */ |
| 3183 | if (tmp) { |
| 3184 | parent = pdev->bus->self; |
| 3185 | while (parent != tmp) { |
| 3186 | iommu_detach_dev(iommu, parent->bus->number, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3187 | parent->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3188 | parent = parent->bus->self; |
| 3189 | } |
| 3190 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
| 3191 | iommu_detach_dev(iommu, |
| 3192 | tmp->subordinate->number, 0); |
| 3193 | else /* this is a legacy PCI bridge */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3194 | iommu_detach_dev(iommu, tmp->bus->number, |
| 3195 | tmp->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3196 | } |
| 3197 | } |
| 3198 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3199 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3200 | struct pci_dev *pdev) |
| 3201 | { |
| 3202 | struct device_domain_info *info; |
| 3203 | struct intel_iommu *iommu; |
| 3204 | unsigned long flags; |
| 3205 | int found = 0; |
| 3206 | struct list_head *entry, *tmp; |
| 3207 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3208 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 3209 | pdev->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3210 | if (!iommu) |
| 3211 | return; |
| 3212 | |
| 3213 | spin_lock_irqsave(&device_domain_lock, flags); |
| 3214 | list_for_each_safe(entry, tmp, &domain->devices) { |
| 3215 | info = list_entry(entry, struct device_domain_info, link); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3216 | /* No need to compare PCI domain; it has to be the same */ |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3217 | if (info->bus == pdev->bus->number && |
| 3218 | info->devfn == pdev->devfn) { |
| 3219 | list_del(&info->link); |
| 3220 | list_del(&info->global); |
| 3221 | if (info->dev) |
| 3222 | info->dev->dev.archdata.iommu = NULL; |
| 3223 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 3224 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 3225 | iommu_disable_dev_iotlb(info); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3226 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3227 | iommu_detach_dependent_devices(iommu, pdev); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3228 | free_devinfo_mem(info); |
| 3229 | |
| 3230 | spin_lock_irqsave(&device_domain_lock, flags); |
| 3231 | |
| 3232 | if (found) |
| 3233 | break; |
| 3234 | else |
| 3235 | continue; |
| 3236 | } |
| 3237 | |
| 3238 | /* if there is no other devices under the same iommu |
| 3239 | * owned by this domain, clear this iommu in iommu_bmp |
| 3240 | * update iommu count and coherency |
| 3241 | */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3242 | if (iommu == device_to_iommu(info->segment, info->bus, |
| 3243 | info->devfn)) |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3244 | found = 1; |
| 3245 | } |
| 3246 | |
| 3247 | if (found == 0) { |
| 3248 | unsigned long tmp_flags; |
| 3249 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); |
| 3250 | clear_bit(iommu->seq_id, &domain->iommu_bmp); |
| 3251 | domain->iommu_count--; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3252 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3253 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); |
| 3254 | } |
| 3255 | |
| 3256 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 3257 | } |
| 3258 | |
| 3259 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) |
| 3260 | { |
| 3261 | struct device_domain_info *info; |
| 3262 | struct intel_iommu *iommu; |
| 3263 | unsigned long flags1, flags2; |
| 3264 | |
| 3265 | spin_lock_irqsave(&device_domain_lock, flags1); |
| 3266 | while (!list_empty(&domain->devices)) { |
| 3267 | info = list_entry(domain->devices.next, |
| 3268 | struct device_domain_info, link); |
| 3269 | list_del(&info->link); |
| 3270 | list_del(&info->global); |
| 3271 | if (info->dev) |
| 3272 | info->dev->dev.archdata.iommu = NULL; |
| 3273 | |
| 3274 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
| 3275 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 3276 | iommu_disable_dev_iotlb(info); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3277 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3278 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3279 | iommu_detach_dependent_devices(iommu, info->dev); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3280 | |
| 3281 | /* clear this iommu in iommu_bmp, update iommu count |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3282 | * and capabilities |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3283 | */ |
| 3284 | spin_lock_irqsave(&domain->iommu_lock, flags2); |
| 3285 | if (test_and_clear_bit(iommu->seq_id, |
| 3286 | &domain->iommu_bmp)) { |
| 3287 | domain->iommu_count--; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3288 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3289 | } |
| 3290 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); |
| 3291 | |
| 3292 | free_devinfo_mem(info); |
| 3293 | spin_lock_irqsave(&device_domain_lock, flags1); |
| 3294 | } |
| 3295 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
| 3296 | } |
| 3297 | |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3298 | /* domain id for virtual machine, it won't be set in context */ |
| 3299 | static unsigned long vm_domid; |
| 3300 | |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3301 | static int vm_domain_min_agaw(struct dmar_domain *domain) |
| 3302 | { |
| 3303 | int i; |
| 3304 | int min_agaw = domain->agaw; |
| 3305 | |
| 3306 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 3307 | for (; i < g_num_of_iommus; ) { |
| 3308 | if (min_agaw > g_iommus[i]->agaw) |
| 3309 | min_agaw = g_iommus[i]->agaw; |
| 3310 | |
| 3311 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 3312 | } |
| 3313 | |
| 3314 | return min_agaw; |
| 3315 | } |
| 3316 | |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3317 | static struct dmar_domain *iommu_alloc_vm_domain(void) |
| 3318 | { |
| 3319 | struct dmar_domain *domain; |
| 3320 | |
| 3321 | domain = alloc_domain_mem(); |
| 3322 | if (!domain) |
| 3323 | return NULL; |
| 3324 | |
| 3325 | domain->id = vm_domid++; |
| 3326 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
| 3327 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; |
| 3328 | |
| 3329 | return domain; |
| 3330 | } |
| 3331 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3332 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3333 | { |
| 3334 | int adjust_width; |
| 3335 | |
| 3336 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
| 3337 | spin_lock_init(&domain->mapping_lock); |
| 3338 | spin_lock_init(&domain->iommu_lock); |
| 3339 | |
| 3340 | domain_reserve_special_ranges(domain); |
| 3341 | |
| 3342 | /* calculate AGAW */ |
| 3343 | domain->gaw = guest_width; |
| 3344 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
| 3345 | domain->agaw = width_to_agaw(adjust_width); |
| 3346 | |
| 3347 | INIT_LIST_HEAD(&domain->devices); |
| 3348 | |
| 3349 | domain->iommu_count = 0; |
| 3350 | domain->iommu_coherency = 0; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3351 | domain->max_addr = 0; |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3352 | |
| 3353 | /* always allocate the top pgd */ |
| 3354 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
| 3355 | if (!domain->pgd) |
| 3356 | return -ENOMEM; |
| 3357 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); |
| 3358 | return 0; |
| 3359 | } |
| 3360 | |
| 3361 | static void iommu_free_vm_domain(struct dmar_domain *domain) |
| 3362 | { |
| 3363 | unsigned long flags; |
| 3364 | struct dmar_drhd_unit *drhd; |
| 3365 | struct intel_iommu *iommu; |
| 3366 | unsigned long i; |
| 3367 | unsigned long ndomains; |
| 3368 | |
| 3369 | for_each_drhd_unit(drhd) { |
| 3370 | if (drhd->ignored) |
| 3371 | continue; |
| 3372 | iommu = drhd->iommu; |
| 3373 | |
| 3374 | ndomains = cap_ndoms(iommu->cap); |
| 3375 | i = find_first_bit(iommu->domain_ids, ndomains); |
| 3376 | for (; i < ndomains; ) { |
| 3377 | if (iommu->domains[i] == domain) { |
| 3378 | spin_lock_irqsave(&iommu->lock, flags); |
| 3379 | clear_bit(i, iommu->domain_ids); |
| 3380 | iommu->domains[i] = NULL; |
| 3381 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 3382 | break; |
| 3383 | } |
| 3384 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); |
| 3385 | } |
| 3386 | } |
| 3387 | } |
| 3388 | |
| 3389 | static void vm_domain_exit(struct dmar_domain *domain) |
| 3390 | { |
| 3391 | u64 end; |
| 3392 | |
| 3393 | /* Domain 0 is reserved, so dont process it */ |
| 3394 | if (!domain) |
| 3395 | return; |
| 3396 | |
| 3397 | vm_domain_remove_all_dev_info(domain); |
| 3398 | /* destroy iovas */ |
| 3399 | put_iova_domain(&domain->iovad); |
| 3400 | end = DOMAIN_MAX_ADDR(domain->gaw); |
| 3401 | end = end & (~VTD_PAGE_MASK); |
| 3402 | |
| 3403 | /* clear ptes */ |
| 3404 | dma_pte_clear_range(domain, 0, end); |
| 3405 | |
| 3406 | /* free page tables */ |
| 3407 | dma_pte_free_pagetable(domain, 0, end); |
| 3408 | |
| 3409 | iommu_free_vm_domain(domain); |
| 3410 | free_domain_mem(domain); |
| 3411 | } |
| 3412 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3413 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3414 | { |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3415 | struct dmar_domain *dmar_domain; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3416 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3417 | dmar_domain = iommu_alloc_vm_domain(); |
| 3418 | if (!dmar_domain) { |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3419 | printk(KERN_ERR |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3420 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
| 3421 | return -ENOMEM; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3422 | } |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3423 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3424 | printk(KERN_ERR |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3425 | "intel_iommu_domain_init() failed\n"); |
| 3426 | vm_domain_exit(dmar_domain); |
| 3427 | return -ENOMEM; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3428 | } |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3429 | domain->priv = dmar_domain; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3430 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3431 | return 0; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3432 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3433 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3434 | static void intel_iommu_domain_destroy(struct iommu_domain *domain) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3435 | { |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3436 | struct dmar_domain *dmar_domain = domain->priv; |
| 3437 | |
| 3438 | domain->priv = NULL; |
| 3439 | vm_domain_exit(dmar_domain); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3440 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3441 | |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3442 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
| 3443 | struct device *dev) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3444 | { |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3445 | struct dmar_domain *dmar_domain = domain->priv; |
| 3446 | struct pci_dev *pdev = to_pci_dev(dev); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3447 | struct intel_iommu *iommu; |
| 3448 | int addr_width; |
| 3449 | u64 end; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3450 | int ret; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3451 | |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3452 | /* normally pdev is not mapped */ |
| 3453 | if (unlikely(domain_context_mapped(pdev))) { |
| 3454 | struct dmar_domain *old_domain; |
| 3455 | |
| 3456 | old_domain = find_domain(pdev); |
| 3457 | if (old_domain) { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3458 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 3459 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
| 3460 | domain_remove_one_dev_info(old_domain, pdev); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3461 | else |
| 3462 | domain_remove_dev_info(old_domain); |
| 3463 | } |
| 3464 | } |
| 3465 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3466 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 3467 | pdev->devfn); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3468 | if (!iommu) |
| 3469 | return -ENODEV; |
| 3470 | |
| 3471 | /* check if this iommu agaw is sufficient for max mapped address */ |
| 3472 | addr_width = agaw_to_width(iommu->agaw); |
| 3473 | end = DOMAIN_MAX_ADDR(addr_width); |
| 3474 | end = end & VTD_PAGE_MASK; |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3475 | if (end < dmar_domain->max_addr) { |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3476 | printk(KERN_ERR "%s: iommu agaw (%d) is not " |
| 3477 | "sufficient for the mapped address (%llx)\n", |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3478 | __func__, iommu->agaw, dmar_domain->max_addr); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3479 | return -EFAULT; |
| 3480 | } |
| 3481 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3482 | ret = domain_add_dev_info(dmar_domain, pdev); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3483 | if (ret) |
| 3484 | return ret; |
| 3485 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 3486 | ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3487 | return ret; |
| 3488 | } |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3489 | |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3490 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
| 3491 | struct device *dev) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3492 | { |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3493 | struct dmar_domain *dmar_domain = domain->priv; |
| 3494 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3495 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame^] | 3496 | domain_remove_one_dev_info(dmar_domain, pdev); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3497 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3498 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3499 | static int intel_iommu_map_range(struct iommu_domain *domain, |
| 3500 | unsigned long iova, phys_addr_t hpa, |
| 3501 | size_t size, int iommu_prot) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3502 | { |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3503 | struct dmar_domain *dmar_domain = domain->priv; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3504 | u64 max_addr; |
| 3505 | int addr_width; |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3506 | int prot = 0; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3507 | int ret; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3508 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3509 | if (iommu_prot & IOMMU_READ) |
| 3510 | prot |= DMA_PTE_READ; |
| 3511 | if (iommu_prot & IOMMU_WRITE) |
| 3512 | prot |= DMA_PTE_WRITE; |
Sheng Yang | 9cf0669 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 3513 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
| 3514 | prot |= DMA_PTE_SNP; |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3515 | |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3516 | max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3517 | if (dmar_domain->max_addr < max_addr) { |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3518 | int min_agaw; |
| 3519 | u64 end; |
| 3520 | |
| 3521 | /* check if minimum agaw is sufficient for mapped address */ |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3522 | min_agaw = vm_domain_min_agaw(dmar_domain); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3523 | addr_width = agaw_to_width(min_agaw); |
| 3524 | end = DOMAIN_MAX_ADDR(addr_width); |
| 3525 | end = end & VTD_PAGE_MASK; |
| 3526 | if (end < max_addr) { |
| 3527 | printk(KERN_ERR "%s: iommu agaw (%d) is not " |
| 3528 | "sufficient for the mapped address (%llx)\n", |
| 3529 | __func__, min_agaw, max_addr); |
| 3530 | return -EFAULT; |
| 3531 | } |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3532 | dmar_domain->max_addr = max_addr; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3533 | } |
| 3534 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3535 | ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3536 | return ret; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3537 | } |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3538 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3539 | static void intel_iommu_unmap_range(struct iommu_domain *domain, |
| 3540 | unsigned long iova, size_t size) |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3541 | { |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3542 | struct dmar_domain *dmar_domain = domain->priv; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3543 | dma_addr_t base; |
| 3544 | |
| 3545 | /* The address might not be aligned */ |
| 3546 | base = iova & VTD_PAGE_MASK; |
| 3547 | size = VTD_PAGE_ALIGN(size); |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3548 | dma_pte_clear_range(dmar_domain, base, base + size); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3549 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3550 | if (dmar_domain->max_addr == base + size) |
| 3551 | dmar_domain->max_addr = base; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3552 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3553 | |
Joerg Roedel | d14d657 | 2008-12-03 15:06:57 +0100 | [diff] [blame] | 3554 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
| 3555 | unsigned long iova) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3556 | { |
Joerg Roedel | d14d657 | 2008-12-03 15:06:57 +0100 | [diff] [blame] | 3557 | struct dmar_domain *dmar_domain = domain->priv; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3558 | struct dma_pte *pte; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3559 | u64 phys = 0; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3560 | |
Joerg Roedel | d14d657 | 2008-12-03 15:06:57 +0100 | [diff] [blame] | 3561 | pte = addr_to_dma_pte(dmar_domain, iova); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3562 | if (pte) |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3563 | phys = dma_pte_addr(pte); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3564 | |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3565 | return phys; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3566 | } |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3567 | |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 3568 | static int intel_iommu_domain_has_cap(struct iommu_domain *domain, |
| 3569 | unsigned long cap) |
| 3570 | { |
| 3571 | struct dmar_domain *dmar_domain = domain->priv; |
| 3572 | |
| 3573 | if (cap == IOMMU_CAP_CACHE_COHERENCY) |
| 3574 | return dmar_domain->iommu_snooping; |
| 3575 | |
| 3576 | return 0; |
| 3577 | } |
| 3578 | |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3579 | static struct iommu_ops intel_iommu_ops = { |
| 3580 | .domain_init = intel_iommu_domain_init, |
| 3581 | .domain_destroy = intel_iommu_domain_destroy, |
| 3582 | .attach_dev = intel_iommu_attach_device, |
| 3583 | .detach_dev = intel_iommu_detach_device, |
| 3584 | .map = intel_iommu_map_range, |
| 3585 | .unmap = intel_iommu_unmap_range, |
| 3586 | .iova_to_phys = intel_iommu_iova_to_phys, |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 3587 | .domain_has_cap = intel_iommu_domain_has_cap, |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3588 | }; |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 3589 | |
| 3590 | static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) |
| 3591 | { |
| 3592 | /* |
| 3593 | * Mobile 4 Series Chipset neglects to set RWBF capability, |
| 3594 | * but needs it: |
| 3595 | */ |
| 3596 | printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); |
| 3597 | rwbf_quirk = 1; |
| 3598 | } |
| 3599 | |
| 3600 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); |