Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 17 | * Copyright (C) 2006-2008 Intel Corporation |
| 18 | * Author: Ashok Raj <ashok.raj@intel.com> |
| 19 | * Author: Shaohua Li <shaohua.li@intel.com> |
| 20 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 21 | * Author: Fenghua Yu <fenghua.yu@intel.com> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 22 | */ |
| 23 | |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/bitmap.h> |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 26 | #include <linux/debugfs.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 27 | #include <linux/slab.h> |
| 28 | #include <linux/irq.h> |
| 29 | #include <linux/interrupt.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 30 | #include <linux/spinlock.h> |
| 31 | #include <linux/pci.h> |
| 32 | #include <linux/dmar.h> |
| 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/mempool.h> |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 35 | #include <linux/timer.h> |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 36 | #include <linux/iova.h> |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 37 | #include <linux/iommu.h> |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 38 | #include <linux/intel-iommu.h> |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 39 | #include <linux/sysdev.h> |
Shane Wang | 69575d3 | 2009-09-01 18:25:07 -0700 | [diff] [blame] | 40 | #include <linux/tboot.h> |
Stephen Rothwell | adb2fe0 | 2009-08-31 15:24:23 +1000 | [diff] [blame] | 41 | #include <linux/dmi.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 42 | #include <asm/cacheflush.h> |
FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 43 | #include <asm/iommu.h> |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 44 | #include "pci.h" |
| 45 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 46 | #define ROOT_SIZE VTD_PAGE_SIZE |
| 47 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
| 48 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 49 | #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) |
| 50 | #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) |
| 51 | |
| 52 | #define IOAPIC_RANGE_START (0xfee00000) |
| 53 | #define IOAPIC_RANGE_END (0xfeefffff) |
| 54 | #define IOVA_START_ADDR (0x1000) |
| 55 | |
| 56 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
| 57 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 58 | #define MAX_AGAW_WIDTH 64 |
| 59 | |
David Woodhouse | 2ebe315 | 2009-09-19 07:34:04 -0700 | [diff] [blame] | 60 | #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) |
| 61 | #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) |
| 62 | |
| 63 | /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR |
| 64 | to match. That way, we can use 'unsigned long' for PFNs with impunity. */ |
| 65 | #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \ |
| 66 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) |
| 67 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 68 | |
Mark McLoughlin | f27be03 | 2008-11-20 15:49:43 +0000 | [diff] [blame] | 69 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 70 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
Yang Hongyang | 6a35528 | 2009-04-06 19:01:13 -0700 | [diff] [blame] | 71 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 72 | |
David Woodhouse | fd18de5 | 2009-05-10 23:57:41 +0100 | [diff] [blame] | 73 | |
David Woodhouse | dd4e831 | 2009-06-27 16:21:20 +0100 | [diff] [blame] | 74 | /* VT-d pages must always be _smaller_ than MM pages. Otherwise things |
| 75 | are never going to work. */ |
| 76 | static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) |
| 77 | { |
| 78 | return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); |
| 79 | } |
| 80 | |
| 81 | static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) |
| 82 | { |
| 83 | return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); |
| 84 | } |
| 85 | static inline unsigned long page_to_dma_pfn(struct page *pg) |
| 86 | { |
| 87 | return mm_to_dma_pfn(page_to_pfn(pg)); |
| 88 | } |
| 89 | static inline unsigned long virt_to_dma_pfn(void *p) |
| 90 | { |
| 91 | return page_to_dma_pfn(virt_to_page(p)); |
| 92 | } |
| 93 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 94 | /* global iommu list, set NULL for ignored DMAR units */ |
| 95 | static struct intel_iommu **g_iommus; |
| 96 | |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 97 | static int rwbf_quirk; |
| 98 | |
Mark McLoughlin | 46b08e1 | 2008-11-20 15:49:44 +0000 | [diff] [blame] | 99 | /* |
| 100 | * 0: Present |
| 101 | * 1-11: Reserved |
| 102 | * 12-63: Context Ptr (12 - (haw-1)) |
| 103 | * 64-127: Reserved |
| 104 | */ |
| 105 | struct root_entry { |
| 106 | u64 val; |
| 107 | u64 rsvd1; |
| 108 | }; |
| 109 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
| 110 | static inline bool root_present(struct root_entry *root) |
| 111 | { |
| 112 | return (root->val & 1); |
| 113 | } |
| 114 | static inline void set_root_present(struct root_entry *root) |
| 115 | { |
| 116 | root->val |= 1; |
| 117 | } |
| 118 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
| 119 | { |
| 120 | root->val |= value & VTD_PAGE_MASK; |
| 121 | } |
| 122 | |
| 123 | static inline struct context_entry * |
| 124 | get_context_addr_from_root(struct root_entry *root) |
| 125 | { |
| 126 | return (struct context_entry *) |
| 127 | (root_present(root)?phys_to_virt( |
| 128 | root->val & VTD_PAGE_MASK) : |
| 129 | NULL); |
| 130 | } |
| 131 | |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 132 | /* |
| 133 | * low 64 bits: |
| 134 | * 0: present |
| 135 | * 1: fault processing disable |
| 136 | * 2-3: translation type |
| 137 | * 12-63: address space root |
| 138 | * high 64 bits: |
| 139 | * 0-2: address width |
| 140 | * 3-6: aval |
| 141 | * 8-23: domain id |
| 142 | */ |
| 143 | struct context_entry { |
| 144 | u64 lo; |
| 145 | u64 hi; |
| 146 | }; |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 147 | |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 148 | static inline bool context_present(struct context_entry *context) |
| 149 | { |
| 150 | return (context->lo & 1); |
| 151 | } |
| 152 | static inline void context_set_present(struct context_entry *context) |
| 153 | { |
| 154 | context->lo |= 1; |
| 155 | } |
| 156 | |
| 157 | static inline void context_set_fault_enable(struct context_entry *context) |
| 158 | { |
| 159 | context->lo &= (((u64)-1) << 2) | 1; |
| 160 | } |
| 161 | |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 162 | static inline void context_set_translation_type(struct context_entry *context, |
| 163 | unsigned long value) |
| 164 | { |
| 165 | context->lo &= (((u64)-1) << 4) | 3; |
| 166 | context->lo |= (value & 3) << 2; |
| 167 | } |
| 168 | |
| 169 | static inline void context_set_address_root(struct context_entry *context, |
| 170 | unsigned long value) |
| 171 | { |
| 172 | context->lo |= value & VTD_PAGE_MASK; |
| 173 | } |
| 174 | |
| 175 | static inline void context_set_address_width(struct context_entry *context, |
| 176 | unsigned long value) |
| 177 | { |
| 178 | context->hi |= value & 7; |
| 179 | } |
| 180 | |
| 181 | static inline void context_set_domain_id(struct context_entry *context, |
| 182 | unsigned long value) |
| 183 | { |
| 184 | context->hi |= (value & ((1 << 16) - 1)) << 8; |
| 185 | } |
| 186 | |
| 187 | static inline void context_clear_entry(struct context_entry *context) |
| 188 | { |
| 189 | context->lo = 0; |
| 190 | context->hi = 0; |
| 191 | } |
Mark McLoughlin | 7a8fc25 | 2008-11-20 15:49:45 +0000 | [diff] [blame] | 192 | |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 193 | /* |
| 194 | * 0: readable |
| 195 | * 1: writable |
| 196 | * 2-6: reserved |
| 197 | * 7: super page |
Sheng Yang | 9cf06697 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 198 | * 8-10: available |
| 199 | * 11: snoop behavior |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 200 | * 12-63: Host physcial address |
| 201 | */ |
| 202 | struct dma_pte { |
| 203 | u64 val; |
| 204 | }; |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 205 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 206 | static inline void dma_clear_pte(struct dma_pte *pte) |
| 207 | { |
| 208 | pte->val = 0; |
| 209 | } |
| 210 | |
| 211 | static inline void dma_set_pte_readable(struct dma_pte *pte) |
| 212 | { |
| 213 | pte->val |= DMA_PTE_READ; |
| 214 | } |
| 215 | |
| 216 | static inline void dma_set_pte_writable(struct dma_pte *pte) |
| 217 | { |
| 218 | pte->val |= DMA_PTE_WRITE; |
| 219 | } |
| 220 | |
Sheng Yang | 9cf06697 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 221 | static inline void dma_set_pte_snp(struct dma_pte *pte) |
| 222 | { |
| 223 | pte->val |= DMA_PTE_SNP; |
| 224 | } |
| 225 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 226 | static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) |
| 227 | { |
| 228 | pte->val = (pte->val & ~3) | (prot & 3); |
| 229 | } |
| 230 | |
| 231 | static inline u64 dma_pte_addr(struct dma_pte *pte) |
| 232 | { |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 233 | #ifdef CONFIG_64BIT |
| 234 | return pte->val & VTD_PAGE_MASK; |
| 235 | #else |
| 236 | /* Must have a full atomic 64-bit read */ |
| 237 | return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK; |
| 238 | #endif |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 239 | } |
| 240 | |
David Woodhouse | dd4e831 | 2009-06-27 16:21:20 +0100 | [diff] [blame] | 241 | static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 242 | { |
David Woodhouse | dd4e831 | 2009-06-27 16:21:20 +0100 | [diff] [blame] | 243 | pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | static inline bool dma_pte_present(struct dma_pte *pte) |
| 247 | { |
| 248 | return (pte->val & 3) != 0; |
| 249 | } |
Mark McLoughlin | 622ba12 | 2008-11-20 15:49:46 +0000 | [diff] [blame] | 250 | |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 251 | static inline int first_pte_in_page(struct dma_pte *pte) |
| 252 | { |
| 253 | return !((unsigned long)pte & ~VTD_PAGE_MASK); |
| 254 | } |
| 255 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 256 | /* |
| 257 | * This domain is a statically identity mapping domain. |
| 258 | * 1. This domain creats a static 1:1 mapping to all usable memory. |
| 259 | * 2. It maps to each iommu if successful. |
| 260 | * 3. Each iommu mapps to this domain if successful. |
| 261 | */ |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 262 | static struct dmar_domain *si_domain; |
| 263 | static int hw_pass_through = 1; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 264 | |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 265 | /* devices under the same p2p bridge are owned in one domain */ |
Mike Day | cdc7b83 | 2008-12-12 17:16:30 +0100 | [diff] [blame] | 266 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 267 | |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 268 | /* domain represents a virtual machine, more than one devices |
| 269 | * across iommus may be owned in one domain, e.g. kvm guest. |
| 270 | */ |
| 271 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) |
| 272 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 273 | /* si_domain contains mulitple devices */ |
| 274 | #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) |
| 275 | |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 276 | struct dmar_domain { |
| 277 | int id; /* domain id */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 278 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 279 | |
| 280 | struct list_head devices; /* all devices' list */ |
| 281 | struct iova_domain iovad; /* iova's that belong to this domain */ |
| 282 | |
| 283 | struct dma_pte *pgd; /* virtual address */ |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 284 | int gaw; /* max guest address width */ |
| 285 | |
| 286 | /* adjusted guest address width, 0 is level 2 30-bit */ |
| 287 | int agaw; |
| 288 | |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 289 | int flags; /* flags to find out type of domain */ |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 290 | |
| 291 | int iommu_coherency;/* indicate coherency of iommu access */ |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 292 | int iommu_snooping; /* indicate snooping control feature*/ |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 293 | int iommu_count; /* reference count of iommu */ |
| 294 | spinlock_t iommu_lock; /* protect iommu set in domain */ |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 295 | u64 max_addr; /* maximum mapped address */ |
Mark McLoughlin | 99126f7 | 2008-11-20 15:49:47 +0000 | [diff] [blame] | 296 | }; |
| 297 | |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 298 | /* PCI domain-device relationship */ |
| 299 | struct device_domain_info { |
| 300 | struct list_head link; /* link to domain siblings */ |
| 301 | struct list_head global; /* link to global list */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 302 | int segment; /* PCI domain */ |
| 303 | u8 bus; /* PCI bus number */ |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 304 | u8 devfn; /* PCI devfn number */ |
| 305 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 306 | struct intel_iommu *iommu; /* IOMMU used by this device */ |
Mark McLoughlin | a647dac | 2008-11-20 15:49:48 +0000 | [diff] [blame] | 307 | struct dmar_domain *domain; /* pointer to domain */ |
| 308 | }; |
| 309 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 310 | static void flush_unmaps_timeout(unsigned long data); |
| 311 | |
| 312 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
| 313 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 314 | #define HIGH_WATER_MARK 250 |
| 315 | struct deferred_flush_tables { |
| 316 | int next; |
| 317 | struct iova *iova[HIGH_WATER_MARK]; |
| 318 | struct dmar_domain *domain[HIGH_WATER_MARK]; |
| 319 | }; |
| 320 | |
| 321 | static struct deferred_flush_tables *deferred_flush; |
| 322 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 323 | /* bitmap for indexing intel_iommus */ |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 324 | static int g_num_of_iommus; |
| 325 | |
| 326 | static DEFINE_SPINLOCK(async_umap_flush_lock); |
| 327 | static LIST_HEAD(unmaps_to_do); |
| 328 | |
| 329 | static int timer_on; |
| 330 | static long list_size; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 331 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 332 | static void domain_remove_dev_info(struct dmar_domain *domain); |
| 333 | |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 334 | #ifdef CONFIG_DMAR_DEFAULT_ON |
| 335 | int dmar_disabled = 0; |
| 336 | #else |
| 337 | int dmar_disabled = 1; |
| 338 | #endif /*CONFIG_DMAR_DEFAULT_ON*/ |
| 339 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 340 | static int __initdata dmar_map_gfx = 1; |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 341 | static int dmar_forcedac; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 342 | static int intel_iommu_strict; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 343 | |
| 344 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
| 345 | static DEFINE_SPINLOCK(device_domain_lock); |
| 346 | static LIST_HEAD(device_domain_list); |
| 347 | |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 348 | static struct iommu_ops intel_iommu_ops; |
| 349 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 350 | static int __init intel_iommu_setup(char *str) |
| 351 | { |
| 352 | if (!str) |
| 353 | return -EINVAL; |
| 354 | while (*str) { |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 355 | if (!strncmp(str, "on", 2)) { |
| 356 | dmar_disabled = 0; |
| 357 | printk(KERN_INFO "Intel-IOMMU: enabled\n"); |
| 358 | } else if (!strncmp(str, "off", 3)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 359 | dmar_disabled = 1; |
Kyle McMartin | 0cd5c3c | 2009-02-04 14:29:19 -0800 | [diff] [blame] | 360 | printk(KERN_INFO "Intel-IOMMU: disabled\n"); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 361 | } else if (!strncmp(str, "igfx_off", 8)) { |
| 362 | dmar_map_gfx = 0; |
| 363 | printk(KERN_INFO |
| 364 | "Intel-IOMMU: disable GFX device mapping\n"); |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 365 | } else if (!strncmp(str, "forcedac", 8)) { |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 366 | printk(KERN_INFO |
Keshavamurthy, Anil S | 7d3b03c | 2007-10-21 16:41:53 -0700 | [diff] [blame] | 367 | "Intel-IOMMU: Forcing DAC for PCI devices\n"); |
| 368 | dmar_forcedac = 1; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 369 | } else if (!strncmp(str, "strict", 6)) { |
| 370 | printk(KERN_INFO |
| 371 | "Intel-IOMMU: disable batched IOTLB flush\n"); |
| 372 | intel_iommu_strict = 1; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 373 | } |
| 374 | |
| 375 | str += strcspn(str, ","); |
| 376 | while (*str == ',') |
| 377 | str++; |
| 378 | } |
| 379 | return 0; |
| 380 | } |
| 381 | __setup("intel_iommu=", intel_iommu_setup); |
| 382 | |
| 383 | static struct kmem_cache *iommu_domain_cache; |
| 384 | static struct kmem_cache *iommu_devinfo_cache; |
| 385 | static struct kmem_cache *iommu_iova_cache; |
| 386 | |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 387 | static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) |
| 388 | { |
| 389 | unsigned int flags; |
| 390 | void *vaddr; |
| 391 | |
| 392 | /* trying to avoid low memory issues */ |
| 393 | flags = current->flags & PF_MEMALLOC; |
| 394 | current->flags |= PF_MEMALLOC; |
| 395 | vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC); |
| 396 | current->flags &= (~PF_MEMALLOC | flags); |
| 397 | return vaddr; |
| 398 | } |
| 399 | |
| 400 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 401 | static inline void *alloc_pgtable_page(void) |
| 402 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 403 | unsigned int flags; |
| 404 | void *vaddr; |
| 405 | |
| 406 | /* trying to avoid low memory issues */ |
| 407 | flags = current->flags & PF_MEMALLOC; |
| 408 | current->flags |= PF_MEMALLOC; |
| 409 | vaddr = (void *)get_zeroed_page(GFP_ATOMIC); |
| 410 | current->flags &= (~PF_MEMALLOC | flags); |
| 411 | return vaddr; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | static inline void free_pgtable_page(void *vaddr) |
| 415 | { |
| 416 | free_page((unsigned long)vaddr); |
| 417 | } |
| 418 | |
| 419 | static inline void *alloc_domain_mem(void) |
| 420 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 421 | return iommu_kmem_cache_alloc(iommu_domain_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 422 | } |
| 423 | |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 424 | static void free_domain_mem(void *vaddr) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 425 | { |
| 426 | kmem_cache_free(iommu_domain_cache, vaddr); |
| 427 | } |
| 428 | |
| 429 | static inline void * alloc_devinfo_mem(void) |
| 430 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 431 | return iommu_kmem_cache_alloc(iommu_devinfo_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | static inline void free_devinfo_mem(void *vaddr) |
| 435 | { |
| 436 | kmem_cache_free(iommu_devinfo_cache, vaddr); |
| 437 | } |
| 438 | |
| 439 | struct iova *alloc_iova_mem(void) |
| 440 | { |
Keshavamurthy, Anil S | eb3fa7c | 2007-10-21 16:41:52 -0700 | [diff] [blame] | 441 | return iommu_kmem_cache_alloc(iommu_iova_cache); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | void free_iova_mem(struct iova *iova) |
| 445 | { |
| 446 | kmem_cache_free(iommu_iova_cache, iova); |
| 447 | } |
| 448 | |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 449 | |
| 450 | static inline int width_to_agaw(int width); |
| 451 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 452 | static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 453 | { |
| 454 | unsigned long sagaw; |
| 455 | int agaw = -1; |
| 456 | |
| 457 | sagaw = cap_sagaw(iommu->cap); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 458 | for (agaw = width_to_agaw(max_gaw); |
Weidong Han | 1b57368 | 2008-12-08 15:34:06 +0800 | [diff] [blame] | 459 | agaw >= 0; agaw--) { |
| 460 | if (test_bit(agaw, &sagaw)) |
| 461 | break; |
| 462 | } |
| 463 | |
| 464 | return agaw; |
| 465 | } |
| 466 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 467 | /* |
| 468 | * Calculate max SAGAW for each iommu. |
| 469 | */ |
| 470 | int iommu_calculate_max_sagaw(struct intel_iommu *iommu) |
| 471 | { |
| 472 | return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * calculate agaw for each iommu. |
| 477 | * "SAGAW" may be different across iommus, use a default agaw, and |
| 478 | * get a supported less agaw for iommus that don't support the default agaw. |
| 479 | */ |
| 480 | int iommu_calculate_agaw(struct intel_iommu *iommu) |
| 481 | { |
| 482 | return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 483 | } |
| 484 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 485 | /* This functionin only returns single iommu in a domain */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 486 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) |
| 487 | { |
| 488 | int iommu_id; |
| 489 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 490 | /* si_domain and vm domain should not get here. */ |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 491 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 492 | BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); |
Weidong Han | 1ce28fe | 2008-12-08 16:35:39 +0800 | [diff] [blame] | 493 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 494 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 495 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) |
| 496 | return NULL; |
| 497 | |
| 498 | return g_iommus[iommu_id]; |
| 499 | } |
| 500 | |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 501 | static void domain_update_iommu_coherency(struct dmar_domain *domain) |
| 502 | { |
| 503 | int i; |
| 504 | |
| 505 | domain->iommu_coherency = 1; |
| 506 | |
| 507 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 508 | for (; i < g_num_of_iommus; ) { |
| 509 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
| 510 | domain->iommu_coherency = 0; |
| 511 | break; |
| 512 | } |
| 513 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 514 | } |
| 515 | } |
| 516 | |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 517 | static void domain_update_iommu_snooping(struct dmar_domain *domain) |
| 518 | { |
| 519 | int i; |
| 520 | |
| 521 | domain->iommu_snooping = 1; |
| 522 | |
| 523 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 524 | for (; i < g_num_of_iommus; ) { |
| 525 | if (!ecap_sc_support(g_iommus[i]->ecap)) { |
| 526 | domain->iommu_snooping = 0; |
| 527 | break; |
| 528 | } |
| 529 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | /* Some capabilities may be different across iommus */ |
| 534 | static void domain_update_iommu_cap(struct dmar_domain *domain) |
| 535 | { |
| 536 | domain_update_iommu_coherency(domain); |
| 537 | domain_update_iommu_snooping(domain); |
| 538 | } |
| 539 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 540 | static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 541 | { |
| 542 | struct dmar_drhd_unit *drhd = NULL; |
| 543 | int i; |
| 544 | |
| 545 | for_each_drhd_unit(drhd) { |
| 546 | if (drhd->ignored) |
| 547 | continue; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 548 | if (segment != drhd->segment) |
| 549 | continue; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 550 | |
David Woodhouse | 924b623 | 2009-04-04 00:39:25 +0100 | [diff] [blame] | 551 | for (i = 0; i < drhd->devices_cnt; i++) { |
Dirk Hohndel | 288e487 | 2009-01-11 15:33:51 +0000 | [diff] [blame] | 552 | if (drhd->devices[i] && |
| 553 | drhd->devices[i]->bus->number == bus && |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 554 | drhd->devices[i]->devfn == devfn) |
| 555 | return drhd->iommu; |
David Woodhouse | 4958c5d | 2009-04-06 13:30:01 -0700 | [diff] [blame] | 556 | if (drhd->devices[i] && |
| 557 | drhd->devices[i]->subordinate && |
David Woodhouse | 924b623 | 2009-04-04 00:39:25 +0100 | [diff] [blame] | 558 | drhd->devices[i]->subordinate->number <= bus && |
| 559 | drhd->devices[i]->subordinate->subordinate >= bus) |
| 560 | return drhd->iommu; |
| 561 | } |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 562 | |
| 563 | if (drhd->include_all) |
| 564 | return drhd->iommu; |
| 565 | } |
| 566 | |
| 567 | return NULL; |
| 568 | } |
| 569 | |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 570 | static void domain_flush_cache(struct dmar_domain *domain, |
| 571 | void *addr, int size) |
| 572 | { |
| 573 | if (!domain->iommu_coherency) |
| 574 | clflush_cache_range(addr, size); |
| 575 | } |
| 576 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 577 | /* Gets context entry for a given bus and devfn */ |
| 578 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
| 579 | u8 bus, u8 devfn) |
| 580 | { |
| 581 | struct root_entry *root; |
| 582 | struct context_entry *context; |
| 583 | unsigned long phy_addr; |
| 584 | unsigned long flags; |
| 585 | |
| 586 | spin_lock_irqsave(&iommu->lock, flags); |
| 587 | root = &iommu->root_entry[bus]; |
| 588 | context = get_context_addr_from_root(root); |
| 589 | if (!context) { |
| 590 | context = (struct context_entry *)alloc_pgtable_page(); |
| 591 | if (!context) { |
| 592 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 593 | return NULL; |
| 594 | } |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 595 | __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 596 | phy_addr = virt_to_phys((void *)context); |
| 597 | set_root_value(root, phy_addr); |
| 598 | set_root_present(root); |
| 599 | __iommu_flush_cache(iommu, root, sizeof(*root)); |
| 600 | } |
| 601 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 602 | return &context[devfn]; |
| 603 | } |
| 604 | |
| 605 | static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) |
| 606 | { |
| 607 | struct root_entry *root; |
| 608 | struct context_entry *context; |
| 609 | int ret; |
| 610 | unsigned long flags; |
| 611 | |
| 612 | spin_lock_irqsave(&iommu->lock, flags); |
| 613 | root = &iommu->root_entry[bus]; |
| 614 | context = get_context_addr_from_root(root); |
| 615 | if (!context) { |
| 616 | ret = 0; |
| 617 | goto out; |
| 618 | } |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 619 | ret = context_present(&context[devfn]); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 620 | out: |
| 621 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 622 | return ret; |
| 623 | } |
| 624 | |
| 625 | static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) |
| 626 | { |
| 627 | struct root_entry *root; |
| 628 | struct context_entry *context; |
| 629 | unsigned long flags; |
| 630 | |
| 631 | spin_lock_irqsave(&iommu->lock, flags); |
| 632 | root = &iommu->root_entry[bus]; |
| 633 | context = get_context_addr_from_root(root); |
| 634 | if (context) { |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 635 | context_clear_entry(&context[devfn]); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 636 | __iommu_flush_cache(iommu, &context[devfn], \ |
| 637 | sizeof(*context)); |
| 638 | } |
| 639 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 640 | } |
| 641 | |
| 642 | static void free_context_table(struct intel_iommu *iommu) |
| 643 | { |
| 644 | struct root_entry *root; |
| 645 | int i; |
| 646 | unsigned long flags; |
| 647 | struct context_entry *context; |
| 648 | |
| 649 | spin_lock_irqsave(&iommu->lock, flags); |
| 650 | if (!iommu->root_entry) { |
| 651 | goto out; |
| 652 | } |
| 653 | for (i = 0; i < ROOT_ENTRY_NR; i++) { |
| 654 | root = &iommu->root_entry[i]; |
| 655 | context = get_context_addr_from_root(root); |
| 656 | if (context) |
| 657 | free_pgtable_page(context); |
| 658 | } |
| 659 | free_pgtable_page(iommu->root_entry); |
| 660 | iommu->root_entry = NULL; |
| 661 | out: |
| 662 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 663 | } |
| 664 | |
| 665 | /* page table handling */ |
| 666 | #define LEVEL_STRIDE (9) |
| 667 | #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) |
| 668 | |
| 669 | static inline int agaw_to_level(int agaw) |
| 670 | { |
| 671 | return agaw + 2; |
| 672 | } |
| 673 | |
| 674 | static inline int agaw_to_width(int agaw) |
| 675 | { |
| 676 | return 30 + agaw * LEVEL_STRIDE; |
| 677 | |
| 678 | } |
| 679 | |
| 680 | static inline int width_to_agaw(int width) |
| 681 | { |
| 682 | return (width - 30) / LEVEL_STRIDE; |
| 683 | } |
| 684 | |
| 685 | static inline unsigned int level_to_offset_bits(int level) |
| 686 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 687 | return (level - 1) * LEVEL_STRIDE; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 688 | } |
| 689 | |
David Woodhouse | 77dfa56 | 2009-06-27 16:40:08 +0100 | [diff] [blame] | 690 | static inline int pfn_level_offset(unsigned long pfn, int level) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 691 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 692 | return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 693 | } |
| 694 | |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 695 | static inline unsigned long level_mask(int level) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 696 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 697 | return -1UL << level_to_offset_bits(level); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 698 | } |
| 699 | |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 700 | static inline unsigned long level_size(int level) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 701 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 702 | return 1UL << level_to_offset_bits(level); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 703 | } |
| 704 | |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 705 | static inline unsigned long align_to_level(unsigned long pfn, int level) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 706 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 707 | return (pfn + level_size(level) - 1) & level_mask(level); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 708 | } |
| 709 | |
David Woodhouse | b026fd2 | 2009-06-28 10:37:25 +0100 | [diff] [blame] | 710 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
| 711 | unsigned long pfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 712 | { |
David Woodhouse | b026fd2 | 2009-06-28 10:37:25 +0100 | [diff] [blame] | 713 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 714 | struct dma_pte *parent, *pte = NULL; |
| 715 | int level = agaw_to_level(domain->agaw); |
| 716 | int offset; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 717 | |
| 718 | BUG_ON(!domain->pgd); |
David Woodhouse | b026fd2 | 2009-06-28 10:37:25 +0100 | [diff] [blame] | 719 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 720 | parent = domain->pgd; |
| 721 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 722 | while (level > 0) { |
| 723 | void *tmp_page; |
| 724 | |
David Woodhouse | b026fd2 | 2009-06-28 10:37:25 +0100 | [diff] [blame] | 725 | offset = pfn_level_offset(pfn, level); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 726 | pte = &parent[offset]; |
| 727 | if (level == 1) |
| 728 | break; |
| 729 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 730 | if (!dma_pte_present(pte)) { |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 731 | uint64_t pteval; |
| 732 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 733 | tmp_page = alloc_pgtable_page(); |
| 734 | |
David Woodhouse | 206a73c1 | 2009-07-01 19:30:28 +0100 | [diff] [blame] | 735 | if (!tmp_page) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 736 | return NULL; |
David Woodhouse | 206a73c1 | 2009-07-01 19:30:28 +0100 | [diff] [blame] | 737 | |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 738 | domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); |
Benjamin LaHaise | 64de5af | 2009-09-16 21:05:55 -0400 | [diff] [blame] | 739 | pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 740 | if (cmpxchg64(&pte->val, 0ULL, pteval)) { |
| 741 | /* Someone else set it while we were thinking; use theirs. */ |
| 742 | free_pgtable_page(tmp_page); |
| 743 | } else { |
| 744 | dma_pte_addr(pte); |
| 745 | domain_flush_cache(domain, pte, sizeof(*pte)); |
| 746 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 747 | } |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 748 | parent = phys_to_virt(dma_pte_addr(pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 749 | level--; |
| 750 | } |
| 751 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 752 | return pte; |
| 753 | } |
| 754 | |
| 755 | /* return address's pte at specific level */ |
David Woodhouse | 90dcfb5 | 2009-06-27 17:14:59 +0100 | [diff] [blame] | 756 | static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, |
| 757 | unsigned long pfn, |
| 758 | int level) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 759 | { |
| 760 | struct dma_pte *parent, *pte = NULL; |
| 761 | int total = agaw_to_level(domain->agaw); |
| 762 | int offset; |
| 763 | |
| 764 | parent = domain->pgd; |
| 765 | while (level <= total) { |
David Woodhouse | 90dcfb5 | 2009-06-27 17:14:59 +0100 | [diff] [blame] | 766 | offset = pfn_level_offset(pfn, total); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 767 | pte = &parent[offset]; |
| 768 | if (level == total) |
| 769 | return pte; |
| 770 | |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 771 | if (!dma_pte_present(pte)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 772 | break; |
Mark McLoughlin | 19c239c | 2008-11-21 16:56:53 +0000 | [diff] [blame] | 773 | parent = phys_to_virt(dma_pte_addr(pte)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 774 | total--; |
| 775 | } |
| 776 | return NULL; |
| 777 | } |
| 778 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 779 | /* clear last level pte, a tlb flush should be followed */ |
David Woodhouse | 595badf | 2009-06-27 22:09:11 +0100 | [diff] [blame] | 780 | static void dma_pte_clear_range(struct dmar_domain *domain, |
| 781 | unsigned long start_pfn, |
| 782 | unsigned long last_pfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 783 | { |
David Woodhouse | 04b18e6 | 2009-06-27 19:15:01 +0100 | [diff] [blame] | 784 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
David Woodhouse | 310a5ab | 2009-06-28 18:52:20 +0100 | [diff] [blame] | 785 | struct dma_pte *first_pte, *pte; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 786 | |
David Woodhouse | 04b18e6 | 2009-06-27 19:15:01 +0100 | [diff] [blame] | 787 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
David Woodhouse | 595badf | 2009-06-27 22:09:11 +0100 | [diff] [blame] | 788 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 789 | BUG_ON(start_pfn > last_pfn); |
David Woodhouse | 66eae84 | 2009-06-27 19:00:32 +0100 | [diff] [blame] | 790 | |
David Woodhouse | 04b18e6 | 2009-06-27 19:15:01 +0100 | [diff] [blame] | 791 | /* we don't need lock here; nobody else touches the iova range */ |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 792 | do { |
David Woodhouse | 310a5ab | 2009-06-28 18:52:20 +0100 | [diff] [blame] | 793 | first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); |
| 794 | if (!pte) { |
| 795 | start_pfn = align_to_level(start_pfn + 1, 2); |
| 796 | continue; |
| 797 | } |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 798 | do { |
David Woodhouse | 310a5ab | 2009-06-28 18:52:20 +0100 | [diff] [blame] | 799 | dma_clear_pte(pte); |
| 800 | start_pfn++; |
| 801 | pte++; |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 802 | } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); |
| 803 | |
David Woodhouse | 310a5ab | 2009-06-28 18:52:20 +0100 | [diff] [blame] | 804 | domain_flush_cache(domain, first_pte, |
| 805 | (void *)pte - (void *)first_pte); |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 806 | |
| 807 | } while (start_pfn && start_pfn <= last_pfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /* free page table pages. last level pte should already be cleared */ |
| 811 | static void dma_pte_free_pagetable(struct dmar_domain *domain, |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 812 | unsigned long start_pfn, |
| 813 | unsigned long last_pfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 814 | { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 815 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 816 | struct dma_pte *first_pte, *pte; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 817 | int total = agaw_to_level(domain->agaw); |
| 818 | int level; |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 819 | unsigned long tmp; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 820 | |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 821 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
| 822 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 823 | BUG_ON(start_pfn > last_pfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 824 | |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 825 | /* We don't need lock here; nobody else touches the iova range */ |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 826 | level = 2; |
| 827 | while (level <= total) { |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 828 | tmp = align_to_level(start_pfn, level); |
| 829 | |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 830 | /* If we can't even clear one PTE at this level, we're done */ |
David Woodhouse | 6660c63 | 2009-06-27 22:41:00 +0100 | [diff] [blame] | 831 | if (tmp + level_size(level) - 1 > last_pfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 832 | return; |
| 833 | |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 834 | do { |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 835 | first_pte = pte = dma_pfn_level_pte(domain, tmp, level); |
| 836 | if (!pte) { |
| 837 | tmp = align_to_level(tmp + 1, level + 1); |
| 838 | continue; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 839 | } |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 840 | do { |
David Woodhouse | 6a43e57 | 2009-07-02 12:02:34 +0100 | [diff] [blame] | 841 | if (dma_pte_present(pte)) { |
| 842 | free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); |
| 843 | dma_clear_pte(pte); |
| 844 | } |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 845 | pte++; |
| 846 | tmp += level_size(level); |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 847 | } while (!first_pte_in_page(pte) && |
| 848 | tmp + level_size(level) - 1 <= last_pfn); |
| 849 | |
David Woodhouse | f3a0a52 | 2009-06-30 03:40:07 +0100 | [diff] [blame] | 850 | domain_flush_cache(domain, first_pte, |
| 851 | (void *)pte - (void *)first_pte); |
| 852 | |
David Woodhouse | 59c3628 | 2009-09-19 07:36:28 -0700 | [diff] [blame] | 853 | } while (tmp && tmp + level_size(level) - 1 <= last_pfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 854 | level++; |
| 855 | } |
| 856 | /* free pgd */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 857 | if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 858 | free_pgtable_page(domain->pgd); |
| 859 | domain->pgd = NULL; |
| 860 | } |
| 861 | } |
| 862 | |
| 863 | /* iommu handling */ |
| 864 | static int iommu_alloc_root_entry(struct intel_iommu *iommu) |
| 865 | { |
| 866 | struct root_entry *root; |
| 867 | unsigned long flags; |
| 868 | |
| 869 | root = (struct root_entry *)alloc_pgtable_page(); |
| 870 | if (!root) |
| 871 | return -ENOMEM; |
| 872 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 873 | __iommu_flush_cache(iommu, root, ROOT_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 874 | |
| 875 | spin_lock_irqsave(&iommu->lock, flags); |
| 876 | iommu->root_entry = root; |
| 877 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 878 | |
| 879 | return 0; |
| 880 | } |
| 881 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 882 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
| 883 | { |
| 884 | void *addr; |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 885 | u32 sts; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 886 | unsigned long flag; |
| 887 | |
| 888 | addr = iommu->root_entry; |
| 889 | |
| 890 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 891 | dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); |
| 892 | |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 893 | writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 894 | |
| 895 | /* Make sure hardware complete it */ |
| 896 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 897 | readl, (sts & DMA_GSTS_RTPS), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 898 | |
| 899 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 900 | } |
| 901 | |
| 902 | static void iommu_flush_write_buffer(struct intel_iommu *iommu) |
| 903 | { |
| 904 | u32 val; |
| 905 | unsigned long flag; |
| 906 | |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 907 | if (!rwbf_quirk && !cap_rwbf(iommu->cap)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 908 | return; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 909 | |
| 910 | spin_lock_irqsave(&iommu->register_lock, flag); |
David Woodhouse | 462b60f | 2009-05-10 20:18:18 +0100 | [diff] [blame] | 911 | writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 912 | |
| 913 | /* Make sure hardware complete it */ |
| 914 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 915 | readl, (!(val & DMA_GSTS_WBFS)), val); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 916 | |
| 917 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 918 | } |
| 919 | |
| 920 | /* return value determine if we need a write buffer flush */ |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 921 | static void __iommu_flush_context(struct intel_iommu *iommu, |
| 922 | u16 did, u16 source_id, u8 function_mask, |
| 923 | u64 type) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 924 | { |
| 925 | u64 val = 0; |
| 926 | unsigned long flag; |
| 927 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 928 | switch (type) { |
| 929 | case DMA_CCMD_GLOBAL_INVL: |
| 930 | val = DMA_CCMD_GLOBAL_INVL; |
| 931 | break; |
| 932 | case DMA_CCMD_DOMAIN_INVL: |
| 933 | val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); |
| 934 | break; |
| 935 | case DMA_CCMD_DEVICE_INVL: |
| 936 | val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) |
| 937 | | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); |
| 938 | break; |
| 939 | default: |
| 940 | BUG(); |
| 941 | } |
| 942 | val |= DMA_CCMD_ICC; |
| 943 | |
| 944 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 945 | dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); |
| 946 | |
| 947 | /* Make sure hardware complete it */ |
| 948 | IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, |
| 949 | dmar_readq, (!(val & DMA_CCMD_ICC)), val); |
| 950 | |
| 951 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 952 | } |
| 953 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 954 | /* return value determine if we need a write buffer flush */ |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 955 | static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, |
| 956 | u64 addr, unsigned int size_order, u64 type) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 957 | { |
| 958 | int tlb_offset = ecap_iotlb_offset(iommu->ecap); |
| 959 | u64 val = 0, val_iva = 0; |
| 960 | unsigned long flag; |
| 961 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 962 | switch (type) { |
| 963 | case DMA_TLB_GLOBAL_FLUSH: |
| 964 | /* global flush doesn't need set IVA_REG */ |
| 965 | val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; |
| 966 | break; |
| 967 | case DMA_TLB_DSI_FLUSH: |
| 968 | val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
| 969 | break; |
| 970 | case DMA_TLB_PSI_FLUSH: |
| 971 | val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); |
| 972 | /* Note: always flush non-leaf currently */ |
| 973 | val_iva = size_order | addr; |
| 974 | break; |
| 975 | default: |
| 976 | BUG(); |
| 977 | } |
| 978 | /* Note: set drain read/write */ |
| 979 | #if 0 |
| 980 | /* |
| 981 | * This is probably to be super secure.. Looks like we can |
| 982 | * ignore it without any impact. |
| 983 | */ |
| 984 | if (cap_read_drain(iommu->cap)) |
| 985 | val |= DMA_TLB_READ_DRAIN; |
| 986 | #endif |
| 987 | if (cap_write_drain(iommu->cap)) |
| 988 | val |= DMA_TLB_WRITE_DRAIN; |
| 989 | |
| 990 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 991 | /* Note: Only uses first TLB reg currently */ |
| 992 | if (val_iva) |
| 993 | dmar_writeq(iommu->reg + tlb_offset, val_iva); |
| 994 | dmar_writeq(iommu->reg + tlb_offset + 8, val); |
| 995 | |
| 996 | /* Make sure hardware complete it */ |
| 997 | IOMMU_WAIT_OP(iommu, tlb_offset + 8, |
| 998 | dmar_readq, (!(val & DMA_TLB_IVT)), val); |
| 999 | |
| 1000 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1001 | |
| 1002 | /* check IOTLB invalidation granularity */ |
| 1003 | if (DMA_TLB_IAIG(val) == 0) |
| 1004 | printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); |
| 1005 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
| 1006 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1007 | (unsigned long long)DMA_TLB_IIRG(type), |
| 1008 | (unsigned long long)DMA_TLB_IAIG(val)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1009 | } |
| 1010 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1011 | static struct device_domain_info *iommu_support_dev_iotlb( |
| 1012 | struct dmar_domain *domain, int segment, u8 bus, u8 devfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1013 | { |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1014 | int found = 0; |
| 1015 | unsigned long flags; |
| 1016 | struct device_domain_info *info; |
| 1017 | struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); |
| 1018 | |
| 1019 | if (!ecap_dev_iotlb_support(iommu->ecap)) |
| 1020 | return NULL; |
| 1021 | |
| 1022 | if (!iommu->qi) |
| 1023 | return NULL; |
| 1024 | |
| 1025 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1026 | list_for_each_entry(info, &domain->devices, link) |
| 1027 | if (info->bus == bus && info->devfn == devfn) { |
| 1028 | found = 1; |
| 1029 | break; |
| 1030 | } |
| 1031 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1032 | |
| 1033 | if (!found || !info->dev) |
| 1034 | return NULL; |
| 1035 | |
| 1036 | if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) |
| 1037 | return NULL; |
| 1038 | |
| 1039 | if (!dmar_find_matched_atsr_unit(info->dev)) |
| 1040 | return NULL; |
| 1041 | |
| 1042 | info->iommu = iommu; |
| 1043 | |
| 1044 | return info; |
| 1045 | } |
| 1046 | |
| 1047 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
| 1048 | { |
| 1049 | if (!info) |
| 1050 | return; |
| 1051 | |
| 1052 | pci_enable_ats(info->dev, VTD_PAGE_SHIFT); |
| 1053 | } |
| 1054 | |
| 1055 | static void iommu_disable_dev_iotlb(struct device_domain_info *info) |
| 1056 | { |
| 1057 | if (!info->dev || !pci_ats_enabled(info->dev)) |
| 1058 | return; |
| 1059 | |
| 1060 | pci_disable_ats(info->dev); |
| 1061 | } |
| 1062 | |
| 1063 | static void iommu_flush_dev_iotlb(struct dmar_domain *domain, |
| 1064 | u64 addr, unsigned mask) |
| 1065 | { |
| 1066 | u16 sid, qdep; |
| 1067 | unsigned long flags; |
| 1068 | struct device_domain_info *info; |
| 1069 | |
| 1070 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1071 | list_for_each_entry(info, &domain->devices, link) { |
| 1072 | if (!info->dev || !pci_ats_enabled(info->dev)) |
| 1073 | continue; |
| 1074 | |
| 1075 | sid = info->bus << 8 | info->devfn; |
| 1076 | qdep = pci_ats_queue_depth(info->dev); |
| 1077 | qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); |
| 1078 | } |
| 1079 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1080 | } |
| 1081 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1082 | static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 1083 | unsigned long pfn, unsigned int pages) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1084 | { |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1085 | unsigned int mask = ilog2(__roundup_pow_of_two(pages)); |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 1086 | uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1087 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1088 | BUG_ON(pages == 0); |
| 1089 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1090 | /* |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1091 | * Fallback to domain selective flush if no PSI support or the size is |
| 1092 | * too big. |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1093 | * PSI requires page size to be 2 ^ x, and the base address is naturally |
| 1094 | * aligned to the size |
| 1095 | */ |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1096 | if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) |
| 1097 | iommu->flush.flush_iotlb(iommu, did, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1098 | DMA_TLB_DSI_FLUSH); |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 1099 | else |
| 1100 | iommu->flush.flush_iotlb(iommu, did, addr, mask, |
| 1101 | DMA_TLB_PSI_FLUSH); |
Yu Zhao | bf92df3 | 2009-06-29 11:31:45 +0800 | [diff] [blame] | 1102 | |
| 1103 | /* |
| 1104 | * In caching mode, domain ID 0 is reserved for non-present to present |
| 1105 | * mapping flush. Device IOTLB doesn't need to be flushed in this case. |
| 1106 | */ |
| 1107 | if (!cap_caching_mode(iommu->cap) || did) |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1108 | iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1109 | } |
| 1110 | |
mark gross | f8bab73 | 2008-02-08 04:18:38 -0800 | [diff] [blame] | 1111 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) |
| 1112 | { |
| 1113 | u32 pmen; |
| 1114 | unsigned long flags; |
| 1115 | |
| 1116 | spin_lock_irqsave(&iommu->register_lock, flags); |
| 1117 | pmen = readl(iommu->reg + DMAR_PMEN_REG); |
| 1118 | pmen &= ~DMA_PMEN_EPM; |
| 1119 | writel(pmen, iommu->reg + DMAR_PMEN_REG); |
| 1120 | |
| 1121 | /* wait for the protected region status bit to clear */ |
| 1122 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, |
| 1123 | readl, !(pmen & DMA_PMEN_PRS), pmen); |
| 1124 | |
| 1125 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1126 | } |
| 1127 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1128 | static int iommu_enable_translation(struct intel_iommu *iommu) |
| 1129 | { |
| 1130 | u32 sts; |
| 1131 | unsigned long flags; |
| 1132 | |
| 1133 | spin_lock_irqsave(&iommu->register_lock, flags); |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1134 | iommu->gcmd |= DMA_GCMD_TE; |
| 1135 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1136 | |
| 1137 | /* Make sure hardware complete it */ |
| 1138 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1139 | readl, (sts & DMA_GSTS_TES), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1140 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1141 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
| 1142 | return 0; |
| 1143 | } |
| 1144 | |
| 1145 | static int iommu_disable_translation(struct intel_iommu *iommu) |
| 1146 | { |
| 1147 | u32 sts; |
| 1148 | unsigned long flag; |
| 1149 | |
| 1150 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 1151 | iommu->gcmd &= ~DMA_GCMD_TE; |
| 1152 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
| 1153 | |
| 1154 | /* Make sure hardware complete it */ |
| 1155 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, |
David Woodhouse | c416daa | 2009-05-10 20:30:58 +0100 | [diff] [blame] | 1156 | readl, (!(sts & DMA_GSTS_TES)), sts); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1157 | |
| 1158 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 1159 | return 0; |
| 1160 | } |
| 1161 | |
Keshavamurthy, Anil S | 3460a6d | 2007-10-21 16:41:54 -0700 | [diff] [blame] | 1162 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1163 | static int iommu_init_domains(struct intel_iommu *iommu) |
| 1164 | { |
| 1165 | unsigned long ndomains; |
| 1166 | unsigned long nlongs; |
| 1167 | |
| 1168 | ndomains = cap_ndoms(iommu->cap); |
| 1169 | pr_debug("Number of Domains supportd <%ld>\n", ndomains); |
| 1170 | nlongs = BITS_TO_LONGS(ndomains); |
| 1171 | |
Donald Dutile | 94a91b5 | 2009-08-20 16:51:34 -0400 | [diff] [blame] | 1172 | spin_lock_init(&iommu->lock); |
| 1173 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1174 | /* TBD: there might be 64K domains, |
| 1175 | * consider other allocation for future chip |
| 1176 | */ |
| 1177 | iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); |
| 1178 | if (!iommu->domain_ids) { |
| 1179 | printk(KERN_ERR "Allocating domain id array failed\n"); |
| 1180 | return -ENOMEM; |
| 1181 | } |
| 1182 | iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), |
| 1183 | GFP_KERNEL); |
| 1184 | if (!iommu->domains) { |
| 1185 | printk(KERN_ERR "Allocating domain array failed\n"); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1186 | return -ENOMEM; |
| 1187 | } |
| 1188 | |
| 1189 | /* |
| 1190 | * if Caching mode is set, then invalid translations are tagged |
| 1191 | * with domainid 0. Hence we need to pre-allocate it. |
| 1192 | */ |
| 1193 | if (cap_caching_mode(iommu->cap)) |
| 1194 | set_bit(0, iommu->domain_ids); |
| 1195 | return 0; |
| 1196 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1197 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1198 | |
| 1199 | static void domain_exit(struct dmar_domain *domain); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 1200 | static void vm_domain_exit(struct dmar_domain *domain); |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 1201 | |
| 1202 | void free_dmar_iommu(struct intel_iommu *iommu) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1203 | { |
| 1204 | struct dmar_domain *domain; |
| 1205 | int i; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1206 | unsigned long flags; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1207 | |
Donald Dutile | 94a91b5 | 2009-08-20 16:51:34 -0400 | [diff] [blame] | 1208 | if ((iommu->domains) && (iommu->domain_ids)) { |
| 1209 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
| 1210 | for (; i < cap_ndoms(iommu->cap); ) { |
| 1211 | domain = iommu->domains[i]; |
| 1212 | clear_bit(i, iommu->domain_ids); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1213 | |
Donald Dutile | 94a91b5 | 2009-08-20 16:51:34 -0400 | [diff] [blame] | 1214 | spin_lock_irqsave(&domain->iommu_lock, flags); |
| 1215 | if (--domain->iommu_count == 0) { |
| 1216 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) |
| 1217 | vm_domain_exit(domain); |
| 1218 | else |
| 1219 | domain_exit(domain); |
| 1220 | } |
| 1221 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
| 1222 | |
| 1223 | i = find_next_bit(iommu->domain_ids, |
| 1224 | cap_ndoms(iommu->cap), i+1); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 1225 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1226 | } |
| 1227 | |
| 1228 | if (iommu->gcmd & DMA_GCMD_TE) |
| 1229 | iommu_disable_translation(iommu); |
| 1230 | |
| 1231 | if (iommu->irq) { |
| 1232 | set_irq_data(iommu->irq, NULL); |
| 1233 | /* This will mask the irq */ |
| 1234 | free_irq(iommu->irq, iommu); |
| 1235 | destroy_irq(iommu->irq); |
| 1236 | } |
| 1237 | |
| 1238 | kfree(iommu->domains); |
| 1239 | kfree(iommu->domain_ids); |
| 1240 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 1241 | g_iommus[iommu->seq_id] = NULL; |
| 1242 | |
| 1243 | /* if all iommus are freed, free g_iommus */ |
| 1244 | for (i = 0; i < g_num_of_iommus; i++) { |
| 1245 | if (g_iommus[i]) |
| 1246 | break; |
| 1247 | } |
| 1248 | |
| 1249 | if (i == g_num_of_iommus) |
| 1250 | kfree(g_iommus); |
| 1251 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1252 | /* free context mapping */ |
| 1253 | free_context_table(iommu); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1254 | } |
| 1255 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1256 | static struct dmar_domain *alloc_domain(void) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1257 | { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1258 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1259 | |
| 1260 | domain = alloc_domain_mem(); |
| 1261 | if (!domain) |
| 1262 | return NULL; |
| 1263 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1264 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
Weidong Han | d71a2f3 | 2008-12-07 21:13:41 +0800 | [diff] [blame] | 1265 | domain->flags = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1266 | |
| 1267 | return domain; |
| 1268 | } |
| 1269 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1270 | static int iommu_attach_domain(struct dmar_domain *domain, |
| 1271 | struct intel_iommu *iommu) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1272 | { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1273 | int num; |
| 1274 | unsigned long ndomains; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1275 | unsigned long flags; |
| 1276 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1277 | ndomains = cap_ndoms(iommu->cap); |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1278 | |
| 1279 | spin_lock_irqsave(&iommu->lock, flags); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1280 | |
| 1281 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
| 1282 | if (num >= ndomains) { |
| 1283 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1284 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
| 1285 | return -ENOMEM; |
| 1286 | } |
| 1287 | |
| 1288 | domain->id = num; |
| 1289 | set_bit(num, iommu->domain_ids); |
| 1290 | set_bit(iommu->seq_id, &domain->iommu_bmp); |
| 1291 | iommu->domains[num] = domain; |
| 1292 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1293 | |
| 1294 | return 0; |
| 1295 | } |
| 1296 | |
| 1297 | static void iommu_detach_domain(struct dmar_domain *domain, |
| 1298 | struct intel_iommu *iommu) |
| 1299 | { |
| 1300 | unsigned long flags; |
| 1301 | int num, ndomains; |
| 1302 | int found = 0; |
| 1303 | |
| 1304 | spin_lock_irqsave(&iommu->lock, flags); |
| 1305 | ndomains = cap_ndoms(iommu->cap); |
| 1306 | num = find_first_bit(iommu->domain_ids, ndomains); |
| 1307 | for (; num < ndomains; ) { |
| 1308 | if (iommu->domains[num] == domain) { |
| 1309 | found = 1; |
| 1310 | break; |
| 1311 | } |
| 1312 | num = find_next_bit(iommu->domain_ids, |
| 1313 | cap_ndoms(iommu->cap), num+1); |
| 1314 | } |
| 1315 | |
| 1316 | if (found) { |
| 1317 | clear_bit(num, iommu->domain_ids); |
| 1318 | clear_bit(iommu->seq_id, &domain->iommu_bmp); |
| 1319 | iommu->domains[num] = NULL; |
| 1320 | } |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1321 | spin_unlock_irqrestore(&iommu->lock, flags); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1322 | } |
| 1323 | |
| 1324 | static struct iova_domain reserved_iova_list; |
Mark Gross | 8a443df | 2008-03-04 14:59:31 -0800 | [diff] [blame] | 1325 | static struct lock_class_key reserved_rbtree_key; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1326 | |
| 1327 | static void dmar_init_reserved_ranges(void) |
| 1328 | { |
| 1329 | struct pci_dev *pdev = NULL; |
| 1330 | struct iova *iova; |
| 1331 | int i; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1332 | |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 1333 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1334 | |
Mark Gross | 8a443df | 2008-03-04 14:59:31 -0800 | [diff] [blame] | 1335 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, |
| 1336 | &reserved_rbtree_key); |
| 1337 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1338 | /* IOAPIC ranges shouldn't be accessed by DMA */ |
| 1339 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), |
| 1340 | IOVA_PFN(IOAPIC_RANGE_END)); |
| 1341 | if (!iova) |
| 1342 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); |
| 1343 | |
| 1344 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ |
| 1345 | for_each_pci_dev(pdev) { |
| 1346 | struct resource *r; |
| 1347 | |
| 1348 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { |
| 1349 | r = &pdev->resource[i]; |
| 1350 | if (!r->flags || !(r->flags & IORESOURCE_MEM)) |
| 1351 | continue; |
David Woodhouse | 1a4a455 | 2009-06-28 16:00:42 +0100 | [diff] [blame] | 1352 | iova = reserve_iova(&reserved_iova_list, |
| 1353 | IOVA_PFN(r->start), |
| 1354 | IOVA_PFN(r->end)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1355 | if (!iova) |
| 1356 | printk(KERN_ERR "Reserve iova failed\n"); |
| 1357 | } |
| 1358 | } |
| 1359 | |
| 1360 | } |
| 1361 | |
| 1362 | static void domain_reserve_special_ranges(struct dmar_domain *domain) |
| 1363 | { |
| 1364 | copy_reserved_iova(&reserved_iova_list, &domain->iovad); |
| 1365 | } |
| 1366 | |
| 1367 | static inline int guestwidth_to_adjustwidth(int gaw) |
| 1368 | { |
| 1369 | int agaw; |
| 1370 | int r = (gaw - 12) % 9; |
| 1371 | |
| 1372 | if (r == 0) |
| 1373 | agaw = gaw; |
| 1374 | else |
| 1375 | agaw = gaw + 9 - r; |
| 1376 | if (agaw > 64) |
| 1377 | agaw = 64; |
| 1378 | return agaw; |
| 1379 | } |
| 1380 | |
| 1381 | static int domain_init(struct dmar_domain *domain, int guest_width) |
| 1382 | { |
| 1383 | struct intel_iommu *iommu; |
| 1384 | int adjust_width, agaw; |
| 1385 | unsigned long sagaw; |
| 1386 | |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 1387 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1388 | spin_lock_init(&domain->iommu_lock); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1389 | |
| 1390 | domain_reserve_special_ranges(domain); |
| 1391 | |
| 1392 | /* calculate AGAW */ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1393 | iommu = domain_get_iommu(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1394 | if (guest_width > cap_mgaw(iommu->cap)) |
| 1395 | guest_width = cap_mgaw(iommu->cap); |
| 1396 | domain->gaw = guest_width; |
| 1397 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
| 1398 | agaw = width_to_agaw(adjust_width); |
| 1399 | sagaw = cap_sagaw(iommu->cap); |
| 1400 | if (!test_bit(agaw, &sagaw)) { |
| 1401 | /* hardware doesn't support it, choose a bigger one */ |
| 1402 | pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); |
| 1403 | agaw = find_next_bit(&sagaw, 5, agaw); |
| 1404 | if (agaw >= 5) |
| 1405 | return -ENODEV; |
| 1406 | } |
| 1407 | domain->agaw = agaw; |
| 1408 | INIT_LIST_HEAD(&domain->devices); |
| 1409 | |
Weidong Han | 8e604097 | 2008-12-08 15:49:06 +0800 | [diff] [blame] | 1410 | if (ecap_coherent(iommu->ecap)) |
| 1411 | domain->iommu_coherency = 1; |
| 1412 | else |
| 1413 | domain->iommu_coherency = 0; |
| 1414 | |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 1415 | if (ecap_sc_support(iommu->ecap)) |
| 1416 | domain->iommu_snooping = 1; |
| 1417 | else |
| 1418 | domain->iommu_snooping = 0; |
| 1419 | |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1420 | domain->iommu_count = 1; |
| 1421 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1422 | /* always allocate the top pgd */ |
| 1423 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
| 1424 | if (!domain->pgd) |
| 1425 | return -ENOMEM; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1426 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1427 | return 0; |
| 1428 | } |
| 1429 | |
| 1430 | static void domain_exit(struct dmar_domain *domain) |
| 1431 | { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1432 | struct dmar_drhd_unit *drhd; |
| 1433 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1434 | |
| 1435 | /* Domain 0 is reserved, so dont process it */ |
| 1436 | if (!domain) |
| 1437 | return; |
| 1438 | |
| 1439 | domain_remove_dev_info(domain); |
| 1440 | /* destroy iovas */ |
| 1441 | put_iova_domain(&domain->iovad); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1442 | |
| 1443 | /* clear ptes */ |
David Woodhouse | 595badf | 2009-06-27 22:09:11 +0100 | [diff] [blame] | 1444 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1445 | |
| 1446 | /* free page tables */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 1447 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1448 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1449 | for_each_active_iommu(iommu, drhd) |
| 1450 | if (test_bit(iommu->seq_id, &domain->iommu_bmp)) |
| 1451 | iommu_detach_domain(domain, iommu); |
| 1452 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1453 | free_domain_mem(domain); |
| 1454 | } |
| 1455 | |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1456 | static int domain_context_mapping_one(struct dmar_domain *domain, int segment, |
| 1457 | u8 bus, u8 devfn, int translation) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1458 | { |
| 1459 | struct context_entry *context; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1460 | unsigned long flags; |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1461 | struct intel_iommu *iommu; |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1462 | struct dma_pte *pgd; |
| 1463 | unsigned long num; |
| 1464 | unsigned long ndomains; |
| 1465 | int id; |
| 1466 | int agaw; |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1467 | struct device_domain_info *info = NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1468 | |
| 1469 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
| 1470 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1471 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1472 | BUG_ON(!domain->pgd); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1473 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && |
| 1474 | translation != CONTEXT_TT_MULTI_LEVEL); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1475 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1476 | iommu = device_to_iommu(segment, bus, devfn); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1477 | if (!iommu) |
| 1478 | return -ENODEV; |
| 1479 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1480 | context = device_to_context_entry(iommu, bus, devfn); |
| 1481 | if (!context) |
| 1482 | return -ENOMEM; |
| 1483 | spin_lock_irqsave(&iommu->lock, flags); |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 1484 | if (context_present(context)) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1485 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1486 | return 0; |
| 1487 | } |
| 1488 | |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1489 | id = domain->id; |
| 1490 | pgd = domain->pgd; |
| 1491 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1492 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 1493 | domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { |
Weidong Han | ea6606b | 2008-12-08 23:08:15 +0800 | [diff] [blame] | 1494 | int found = 0; |
| 1495 | |
| 1496 | /* find an available domain id for this device in iommu */ |
| 1497 | ndomains = cap_ndoms(iommu->cap); |
| 1498 | num = find_first_bit(iommu->domain_ids, ndomains); |
| 1499 | for (; num < ndomains; ) { |
| 1500 | if (iommu->domains[num] == domain) { |
| 1501 | id = num; |
| 1502 | found = 1; |
| 1503 | break; |
| 1504 | } |
| 1505 | num = find_next_bit(iommu->domain_ids, |
| 1506 | cap_ndoms(iommu->cap), num+1); |
| 1507 | } |
| 1508 | |
| 1509 | if (found == 0) { |
| 1510 | num = find_first_zero_bit(iommu->domain_ids, ndomains); |
| 1511 | if (num >= ndomains) { |
| 1512 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1513 | printk(KERN_ERR "IOMMU: no free domain ids\n"); |
| 1514 | return -EFAULT; |
| 1515 | } |
| 1516 | |
| 1517 | set_bit(num, iommu->domain_ids); |
| 1518 | iommu->domains[num] = domain; |
| 1519 | id = num; |
| 1520 | } |
| 1521 | |
| 1522 | /* Skip top levels of page tables for |
| 1523 | * iommu which has less agaw than default. |
| 1524 | */ |
| 1525 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { |
| 1526 | pgd = phys_to_virt(dma_pte_addr(pgd)); |
| 1527 | if (!dma_pte_present(pgd)) { |
| 1528 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 1529 | return -ENOMEM; |
| 1530 | } |
| 1531 | } |
| 1532 | } |
| 1533 | |
| 1534 | context_set_domain_id(context, id); |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1535 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1536 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
| 1537 | info = iommu_support_dev_iotlb(domain, segment, bus, devfn); |
| 1538 | translation = info ? CONTEXT_TT_DEV_IOTLB : |
| 1539 | CONTEXT_TT_MULTI_LEVEL; |
| 1540 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1541 | /* |
| 1542 | * In pass through mode, AW must be programmed to indicate the largest |
| 1543 | * AGAW value supported by hardware. And ASR is ignored by hardware. |
| 1544 | */ |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1545 | if (unlikely(translation == CONTEXT_TT_PASS_THROUGH)) |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1546 | context_set_address_width(context, iommu->msagaw); |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1547 | else { |
| 1548 | context_set_address_root(context, virt_to_phys(pgd)); |
| 1549 | context_set_address_width(context, iommu->agaw); |
| 1550 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1551 | |
| 1552 | context_set_translation_type(context, translation); |
Mark McLoughlin | c07e7d2 | 2008-11-21 16:54:46 +0000 | [diff] [blame] | 1553 | context_set_fault_enable(context); |
| 1554 | context_set_present(context); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1555 | domain_flush_cache(domain, context, sizeof(*context)); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1556 | |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1557 | /* |
| 1558 | * It's a non-present to present mapping. If hardware doesn't cache |
| 1559 | * non-present entry we only need to flush the write-buffer. If the |
| 1560 | * _does_ cache non-present entries, then it does so in the special |
| 1561 | * domain #0, which we have to flush: |
| 1562 | */ |
| 1563 | if (cap_caching_mode(iommu->cap)) { |
| 1564 | iommu->flush.flush_context(iommu, 0, |
| 1565 | (((u16)bus) << 8) | devfn, |
| 1566 | DMA_CCMD_MASK_NOBIT, |
| 1567 | DMA_CCMD_DEVICE_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1568 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1569 | } else { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1570 | iommu_flush_write_buffer(iommu); |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1571 | } |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1572 | iommu_enable_dev_iotlb(info); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1573 | spin_unlock_irqrestore(&iommu->lock, flags); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1574 | |
| 1575 | spin_lock_irqsave(&domain->iommu_lock, flags); |
| 1576 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { |
| 1577 | domain->iommu_count++; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 1578 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1579 | } |
| 1580 | spin_unlock_irqrestore(&domain->iommu_lock, flags); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1581 | return 0; |
| 1582 | } |
| 1583 | |
| 1584 | static int |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1585 | domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, |
| 1586 | int translation) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1587 | { |
| 1588 | int ret; |
| 1589 | struct pci_dev *tmp, *parent; |
| 1590 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1591 | ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1592 | pdev->bus->number, pdev->devfn, |
| 1593 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1594 | if (ret) |
| 1595 | return ret; |
| 1596 | |
| 1597 | /* dependent device mapping */ |
| 1598 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1599 | if (!tmp) |
| 1600 | return 0; |
| 1601 | /* Secondary interface's bus number and devfn 0 */ |
| 1602 | parent = pdev->bus->self; |
| 1603 | while (parent != tmp) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1604 | ret = domain_context_mapping_one(domain, |
| 1605 | pci_domain_nr(parent->bus), |
| 1606 | parent->bus->number, |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1607 | parent->devfn, translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1608 | if (ret) |
| 1609 | return ret; |
| 1610 | parent = parent->bus->self; |
| 1611 | } |
| 1612 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
| 1613 | return domain_context_mapping_one(domain, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1614 | pci_domain_nr(tmp->subordinate), |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1615 | tmp->subordinate->number, 0, |
| 1616 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1617 | else /* this is a legacy PCI bridge */ |
| 1618 | return domain_context_mapping_one(domain, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1619 | pci_domain_nr(tmp->bus), |
| 1620 | tmp->bus->number, |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 1621 | tmp->devfn, |
| 1622 | translation); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1623 | } |
| 1624 | |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1625 | static int domain_context_mapped(struct pci_dev *pdev) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1626 | { |
| 1627 | int ret; |
| 1628 | struct pci_dev *tmp, *parent; |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1629 | struct intel_iommu *iommu; |
| 1630 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1631 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 1632 | pdev->devfn); |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 1633 | if (!iommu) |
| 1634 | return -ENODEV; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1635 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1636 | ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1637 | if (!ret) |
| 1638 | return ret; |
| 1639 | /* dependent device mapping */ |
| 1640 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1641 | if (!tmp) |
| 1642 | return ret; |
| 1643 | /* Secondary interface's bus number and devfn 0 */ |
| 1644 | parent = pdev->bus->self; |
| 1645 | while (parent != tmp) { |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1646 | ret = device_context_mapped(iommu, parent->bus->number, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1647 | parent->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1648 | if (!ret) |
| 1649 | return ret; |
| 1650 | parent = parent->bus->self; |
| 1651 | } |
| 1652 | if (tmp->is_pcie) |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1653 | return device_context_mapped(iommu, tmp->subordinate->number, |
| 1654 | 0); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1655 | else |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1656 | return device_context_mapped(iommu, tmp->bus->number, |
| 1657 | tmp->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1658 | } |
| 1659 | |
Fenghua Yu | f532959 | 2009-08-04 15:09:37 -0700 | [diff] [blame] | 1660 | /* Returns a number of VTD pages, but aligned to MM page size */ |
| 1661 | static inline unsigned long aligned_nrpages(unsigned long host_addr, |
| 1662 | size_t size) |
| 1663 | { |
| 1664 | host_addr &= ~PAGE_MASK; |
| 1665 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; |
| 1666 | } |
| 1667 | |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1668 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 1669 | struct scatterlist *sg, unsigned long phys_pfn, |
| 1670 | unsigned long nr_pages, int prot) |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1671 | { |
| 1672 | struct dma_pte *first_pte = NULL, *pte = NULL; |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1673 | phys_addr_t uninitialized_var(pteval); |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1674 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1675 | unsigned long sg_res; |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1676 | |
| 1677 | BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); |
| 1678 | |
| 1679 | if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) |
| 1680 | return -EINVAL; |
| 1681 | |
| 1682 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; |
| 1683 | |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1684 | if (sg) |
| 1685 | sg_res = 0; |
| 1686 | else { |
| 1687 | sg_res = nr_pages + 1; |
| 1688 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; |
| 1689 | } |
| 1690 | |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1691 | while (nr_pages--) { |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 1692 | uint64_t tmp; |
| 1693 | |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1694 | if (!sg_res) { |
Fenghua Yu | f532959 | 2009-08-04 15:09:37 -0700 | [diff] [blame] | 1695 | sg_res = aligned_nrpages(sg->offset, sg->length); |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1696 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
| 1697 | sg->dma_length = sg->length; |
| 1698 | pteval = page_to_phys(sg_page(sg)) | prot; |
| 1699 | } |
| 1700 | if (!pte) { |
| 1701 | first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); |
| 1702 | if (!pte) |
| 1703 | return -ENOMEM; |
| 1704 | } |
| 1705 | /* We don't need lock here, nobody else |
| 1706 | * touches the iova range |
| 1707 | */ |
David Woodhouse | 7766a3f | 2009-07-01 20:27:03 +0100 | [diff] [blame] | 1708 | tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 1709 | if (tmp) { |
David Woodhouse | 1bf20f0 | 2009-06-29 22:06:43 +0100 | [diff] [blame] | 1710 | static int dumps = 5; |
David Woodhouse | c85994e | 2009-07-01 19:21:24 +0100 | [diff] [blame] | 1711 | printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", |
| 1712 | iov_pfn, tmp, (unsigned long long)pteval); |
David Woodhouse | 1bf20f0 | 2009-06-29 22:06:43 +0100 | [diff] [blame] | 1713 | if (dumps) { |
| 1714 | dumps--; |
| 1715 | debug_dma_dump_mappings(NULL); |
| 1716 | } |
| 1717 | WARN_ON(1); |
| 1718 | } |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1719 | pte++; |
David Woodhouse | 75e6bf9 | 2009-07-02 11:21:16 +0100 | [diff] [blame] | 1720 | if (!nr_pages || first_pte_in_page(pte)) { |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 1721 | domain_flush_cache(domain, first_pte, |
| 1722 | (void *)pte - (void *)first_pte); |
| 1723 | pte = NULL; |
| 1724 | } |
| 1725 | iov_pfn++; |
| 1726 | pteval += VTD_PAGE_SIZE; |
| 1727 | sg_res--; |
| 1728 | if (!sg_res) |
| 1729 | sg = sg_next(sg); |
| 1730 | } |
| 1731 | return 0; |
| 1732 | } |
| 1733 | |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1734 | static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 1735 | struct scatterlist *sg, unsigned long nr_pages, |
| 1736 | int prot) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1737 | { |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1738 | return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); |
| 1739 | } |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 1740 | |
David Woodhouse | 9051aa0 | 2009-06-29 12:30:54 +0100 | [diff] [blame] | 1741 | static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
| 1742 | unsigned long phys_pfn, unsigned long nr_pages, |
| 1743 | int prot) |
| 1744 | { |
| 1745 | return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1746 | } |
| 1747 | |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1748 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1749 | { |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1750 | if (!iommu) |
| 1751 | return; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 1752 | |
| 1753 | clear_context_table(iommu, bus, devfn); |
| 1754 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 1755 | DMA_CCMD_GLOBAL_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 1756 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1757 | } |
| 1758 | |
| 1759 | static void domain_remove_dev_info(struct dmar_domain *domain) |
| 1760 | { |
| 1761 | struct device_domain_info *info; |
| 1762 | unsigned long flags; |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1763 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1764 | |
| 1765 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1766 | while (!list_empty(&domain->devices)) { |
| 1767 | info = list_entry(domain->devices.next, |
| 1768 | struct device_domain_info, link); |
| 1769 | list_del(&info->link); |
| 1770 | list_del(&info->global); |
| 1771 | if (info->dev) |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1772 | info->dev->dev.archdata.iommu = NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1773 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1774 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 1775 | iommu_disable_dev_iotlb(info); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1776 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 1777 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1778 | free_devinfo_mem(info); |
| 1779 | |
| 1780 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1781 | } |
| 1782 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1783 | } |
| 1784 | |
| 1785 | /* |
| 1786 | * find_domain |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1787 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1788 | */ |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 1789 | static struct dmar_domain * |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1790 | find_domain(struct pci_dev *pdev) |
| 1791 | { |
| 1792 | struct device_domain_info *info; |
| 1793 | |
| 1794 | /* No lock here, assumes no domain exit in normal case */ |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1795 | info = pdev->dev.archdata.iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1796 | if (info) |
| 1797 | return info->domain; |
| 1798 | return NULL; |
| 1799 | } |
| 1800 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1801 | /* domain is initialized */ |
| 1802 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
| 1803 | { |
| 1804 | struct dmar_domain *domain, *found = NULL; |
| 1805 | struct intel_iommu *iommu; |
| 1806 | struct dmar_drhd_unit *drhd; |
| 1807 | struct device_domain_info *info, *tmp; |
| 1808 | struct pci_dev *dev_tmp; |
| 1809 | unsigned long flags; |
| 1810 | int bus = 0, devfn = 0; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1811 | int segment; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1812 | int ret; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1813 | |
| 1814 | domain = find_domain(pdev); |
| 1815 | if (domain) |
| 1816 | return domain; |
| 1817 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1818 | segment = pci_domain_nr(pdev->bus); |
| 1819 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1820 | dev_tmp = pci_find_upstream_pcie_bridge(pdev); |
| 1821 | if (dev_tmp) { |
| 1822 | if (dev_tmp->is_pcie) { |
| 1823 | bus = dev_tmp->subordinate->number; |
| 1824 | devfn = 0; |
| 1825 | } else { |
| 1826 | bus = dev_tmp->bus->number; |
| 1827 | devfn = dev_tmp->devfn; |
| 1828 | } |
| 1829 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1830 | list_for_each_entry(info, &device_domain_list, global) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1831 | if (info->segment == segment && |
| 1832 | info->bus == bus && info->devfn == devfn) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1833 | found = info->domain; |
| 1834 | break; |
| 1835 | } |
| 1836 | } |
| 1837 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1838 | /* pcie-pci bridge already has a domain, uses it */ |
| 1839 | if (found) { |
| 1840 | domain = found; |
| 1841 | goto found_domain; |
| 1842 | } |
| 1843 | } |
| 1844 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1845 | domain = alloc_domain(); |
| 1846 | if (!domain) |
| 1847 | goto error; |
| 1848 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1849 | /* Allocate new domain for the device */ |
| 1850 | drhd = dmar_find_matched_drhd_unit(pdev); |
| 1851 | if (!drhd) { |
| 1852 | printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", |
| 1853 | pci_name(pdev)); |
| 1854 | return NULL; |
| 1855 | } |
| 1856 | iommu = drhd->iommu; |
| 1857 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1858 | ret = iommu_attach_domain(domain, iommu); |
| 1859 | if (ret) { |
| 1860 | domain_exit(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1861 | goto error; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1862 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1863 | |
| 1864 | if (domain_init(domain, gaw)) { |
| 1865 | domain_exit(domain); |
| 1866 | goto error; |
| 1867 | } |
| 1868 | |
| 1869 | /* register pcie-to-pci device */ |
| 1870 | if (dev_tmp) { |
| 1871 | info = alloc_devinfo_mem(); |
| 1872 | if (!info) { |
| 1873 | domain_exit(domain); |
| 1874 | goto error; |
| 1875 | } |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1876 | info->segment = segment; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1877 | info->bus = bus; |
| 1878 | info->devfn = devfn; |
| 1879 | info->dev = NULL; |
| 1880 | info->domain = domain; |
| 1881 | /* This domain is shared by devices under p2p bridge */ |
Weidong Han | 3b5410e | 2008-12-08 09:17:15 +0800 | [diff] [blame] | 1882 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1883 | |
| 1884 | /* pcie-to-pci bridge already has a domain, uses it */ |
| 1885 | found = NULL; |
| 1886 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1887 | list_for_each_entry(tmp, &device_domain_list, global) { |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1888 | if (tmp->segment == segment && |
| 1889 | tmp->bus == bus && tmp->devfn == devfn) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1890 | found = tmp->domain; |
| 1891 | break; |
| 1892 | } |
| 1893 | } |
| 1894 | if (found) { |
| 1895 | free_devinfo_mem(info); |
| 1896 | domain_exit(domain); |
| 1897 | domain = found; |
| 1898 | } else { |
| 1899 | list_add(&info->link, &domain->devices); |
| 1900 | list_add(&info->global, &device_domain_list); |
| 1901 | } |
| 1902 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1903 | } |
| 1904 | |
| 1905 | found_domain: |
| 1906 | info = alloc_devinfo_mem(); |
| 1907 | if (!info) |
| 1908 | goto error; |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 1909 | info->segment = segment; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1910 | info->bus = pdev->bus->number; |
| 1911 | info->devfn = pdev->devfn; |
| 1912 | info->dev = pdev; |
| 1913 | info->domain = domain; |
| 1914 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1915 | /* somebody is fast */ |
| 1916 | found = find_domain(pdev); |
| 1917 | if (found != NULL) { |
| 1918 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1919 | if (found != domain) { |
| 1920 | domain_exit(domain); |
| 1921 | domain = found; |
| 1922 | } |
| 1923 | free_devinfo_mem(info); |
| 1924 | return domain; |
| 1925 | } |
| 1926 | list_add(&info->link, &domain->devices); |
| 1927 | list_add(&info->global, &device_domain_list); |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 1928 | pdev->dev.archdata.iommu = info; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1929 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 1930 | return domain; |
| 1931 | error: |
| 1932 | /* recheck it here, maybe others set it */ |
| 1933 | return find_domain(pdev); |
| 1934 | } |
| 1935 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 1936 | static int iommu_identity_mapping; |
| 1937 | |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 1938 | static int iommu_domain_identity_map(struct dmar_domain *domain, |
| 1939 | unsigned long long start, |
| 1940 | unsigned long long end) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1941 | { |
David Woodhouse | c5395d5 | 2009-06-28 16:35:56 +0100 | [diff] [blame] | 1942 | unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; |
| 1943 | unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1944 | |
David Woodhouse | c5395d5 | 2009-06-28 16:35:56 +0100 | [diff] [blame] | 1945 | if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), |
| 1946 | dma_to_mm_pfn(last_vpfn))) { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1947 | printk(KERN_ERR "IOMMU: reserve iova failed\n"); |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 1948 | return -ENOMEM; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1949 | } |
| 1950 | |
David Woodhouse | c5395d5 | 2009-06-28 16:35:56 +0100 | [diff] [blame] | 1951 | pr_debug("Mapping reserved region %llx-%llx for domain %d\n", |
| 1952 | start, end, domain->id); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1953 | /* |
| 1954 | * RMRR range might have overlap with physical memory range, |
| 1955 | * clear it first |
| 1956 | */ |
David Woodhouse | c5395d5 | 2009-06-28 16:35:56 +0100 | [diff] [blame] | 1957 | dma_pte_clear_range(domain, first_vpfn, last_vpfn); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 1958 | |
David Woodhouse | c5395d5 | 2009-06-28 16:35:56 +0100 | [diff] [blame] | 1959 | return domain_pfn_mapping(domain, first_vpfn, first_vpfn, |
| 1960 | last_vpfn - first_vpfn + 1, |
David Woodhouse | 61df744 | 2009-06-28 11:55:58 +0100 | [diff] [blame] | 1961 | DMA_PTE_READ|DMA_PTE_WRITE); |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 1962 | } |
| 1963 | |
| 1964 | static int iommu_prepare_identity_map(struct pci_dev *pdev, |
| 1965 | unsigned long long start, |
| 1966 | unsigned long long end) |
| 1967 | { |
| 1968 | struct dmar_domain *domain; |
| 1969 | int ret; |
| 1970 | |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 1971 | domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 1972 | if (!domain) |
| 1973 | return -ENOMEM; |
| 1974 | |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 1975 | /* For _hardware_ passthrough, don't bother. But for software |
| 1976 | passthrough, we do it anyway -- it may indicate a memory |
| 1977 | range which is reserved in E820, so which didn't get set |
| 1978 | up to start with in si_domain */ |
| 1979 | if (domain == si_domain && hw_pass_through) { |
| 1980 | printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", |
| 1981 | pci_name(pdev), start, end); |
| 1982 | return 0; |
| 1983 | } |
| 1984 | |
| 1985 | printk(KERN_INFO |
| 1986 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", |
| 1987 | pci_name(pdev), start, end); |
David Woodhouse | 2ff729f | 2009-08-26 14:25:41 +0100 | [diff] [blame] | 1988 | |
| 1989 | if (end >> agaw_to_width(domain->agaw)) { |
| 1990 | WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" |
| 1991 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", |
| 1992 | agaw_to_width(domain->agaw), |
| 1993 | dmi_get_system_info(DMI_BIOS_VENDOR), |
| 1994 | dmi_get_system_info(DMI_BIOS_VERSION), |
| 1995 | dmi_get_system_info(DMI_PRODUCT_VERSION)); |
| 1996 | ret = -EIO; |
| 1997 | goto error; |
| 1998 | } |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 1999 | |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 2000 | ret = iommu_domain_identity_map(domain, start, end); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2001 | if (ret) |
| 2002 | goto error; |
| 2003 | |
| 2004 | /* context entry init */ |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2005 | ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
David Woodhouse | b213203 | 2009-06-26 18:50:28 +0100 | [diff] [blame] | 2006 | if (ret) |
| 2007 | goto error; |
| 2008 | |
| 2009 | return 0; |
| 2010 | |
| 2011 | error: |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2012 | domain_exit(domain); |
| 2013 | return ret; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2014 | } |
| 2015 | |
| 2016 | static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, |
| 2017 | struct pci_dev *pdev) |
| 2018 | { |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 2019 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2020 | return 0; |
| 2021 | return iommu_prepare_identity_map(pdev, rmrr->base_address, |
| 2022 | rmrr->end_address + 1); |
| 2023 | } |
| 2024 | |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 2025 | #ifdef CONFIG_DMAR_FLOPPY_WA |
| 2026 | static inline void iommu_prepare_isa(void) |
| 2027 | { |
| 2028 | struct pci_dev *pdev; |
| 2029 | int ret; |
| 2030 | |
| 2031 | pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
| 2032 | if (!pdev) |
| 2033 | return; |
| 2034 | |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2035 | printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 2036 | ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); |
| 2037 | |
| 2038 | if (ret) |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2039 | printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " |
| 2040 | "floppy might not work\n"); |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 2041 | |
| 2042 | } |
| 2043 | #else |
| 2044 | static inline void iommu_prepare_isa(void) |
| 2045 | { |
| 2046 | return; |
| 2047 | } |
| 2048 | #endif /* !CONFIG_DMAR_FLPY_WA */ |
| 2049 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2050 | static int md_domain_init(struct dmar_domain *domain, int guest_width); |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2051 | |
| 2052 | static int __init si_domain_work_fn(unsigned long start_pfn, |
| 2053 | unsigned long end_pfn, void *datax) |
| 2054 | { |
| 2055 | int *ret = datax; |
| 2056 | |
| 2057 | *ret = iommu_domain_identity_map(si_domain, |
| 2058 | (uint64_t)start_pfn << PAGE_SHIFT, |
| 2059 | (uint64_t)end_pfn << PAGE_SHIFT); |
| 2060 | return *ret; |
| 2061 | |
| 2062 | } |
| 2063 | |
Matt Kraai | 071e137 | 2009-08-23 22:30:22 -0700 | [diff] [blame] | 2064 | static int __init si_domain_init(int hw) |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2065 | { |
| 2066 | struct dmar_drhd_unit *drhd; |
| 2067 | struct intel_iommu *iommu; |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2068 | int nid, ret = 0; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2069 | |
| 2070 | si_domain = alloc_domain(); |
| 2071 | if (!si_domain) |
| 2072 | return -EFAULT; |
| 2073 | |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2074 | pr_debug("Identity mapping domain is domain %d\n", si_domain->id); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2075 | |
| 2076 | for_each_active_iommu(iommu, drhd) { |
| 2077 | ret = iommu_attach_domain(si_domain, iommu); |
| 2078 | if (ret) { |
| 2079 | domain_exit(si_domain); |
| 2080 | return -EFAULT; |
| 2081 | } |
| 2082 | } |
| 2083 | |
| 2084 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
| 2085 | domain_exit(si_domain); |
| 2086 | return -EFAULT; |
| 2087 | } |
| 2088 | |
| 2089 | si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; |
| 2090 | |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2091 | if (hw) |
| 2092 | return 0; |
| 2093 | |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2094 | for_each_online_node(nid) { |
| 2095 | work_with_active_regions(nid, si_domain_work_fn, &ret); |
| 2096 | if (ret) |
| 2097 | return ret; |
| 2098 | } |
| 2099 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2100 | return 0; |
| 2101 | } |
| 2102 | |
| 2103 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
| 2104 | struct pci_dev *pdev); |
| 2105 | static int identity_mapping(struct pci_dev *pdev) |
| 2106 | { |
| 2107 | struct device_domain_info *info; |
| 2108 | |
| 2109 | if (likely(!iommu_identity_mapping)) |
| 2110 | return 0; |
| 2111 | |
| 2112 | |
| 2113 | list_for_each_entry(info, &si_domain->devices, link) |
| 2114 | if (info->dev == pdev) |
| 2115 | return 1; |
| 2116 | return 0; |
| 2117 | } |
| 2118 | |
| 2119 | static int domain_add_dev_info(struct dmar_domain *domain, |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 2120 | struct pci_dev *pdev, |
| 2121 | int translation) |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2122 | { |
| 2123 | struct device_domain_info *info; |
| 2124 | unsigned long flags; |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 2125 | int ret; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2126 | |
| 2127 | info = alloc_devinfo_mem(); |
| 2128 | if (!info) |
| 2129 | return -ENOMEM; |
| 2130 | |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 2131 | ret = domain_context_mapping(domain, pdev, translation); |
| 2132 | if (ret) { |
| 2133 | free_devinfo_mem(info); |
| 2134 | return ret; |
| 2135 | } |
| 2136 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2137 | info->segment = pci_domain_nr(pdev->bus); |
| 2138 | info->bus = pdev->bus->number; |
| 2139 | info->devfn = pdev->devfn; |
| 2140 | info->dev = pdev; |
| 2141 | info->domain = domain; |
| 2142 | |
| 2143 | spin_lock_irqsave(&device_domain_lock, flags); |
| 2144 | list_add(&info->link, &domain->devices); |
| 2145 | list_add(&info->global, &device_domain_list); |
| 2146 | pdev->dev.archdata.iommu = info; |
| 2147 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 2148 | |
| 2149 | return 0; |
| 2150 | } |
| 2151 | |
David Woodhouse | 6941af2 | 2009-07-04 18:24:27 +0100 | [diff] [blame] | 2152 | static int iommu_should_identity_map(struct pci_dev *pdev, int startup) |
| 2153 | { |
| 2154 | if (iommu_identity_mapping == 2) |
| 2155 | return IS_GFX_DEVICE(pdev); |
| 2156 | |
David Woodhouse | 3dfc813 | 2009-07-04 19:11:08 +0100 | [diff] [blame] | 2157 | /* |
| 2158 | * We want to start off with all devices in the 1:1 domain, and |
| 2159 | * take them out later if we find they can't access all of memory. |
| 2160 | * |
| 2161 | * However, we can't do this for PCI devices behind bridges, |
| 2162 | * because all PCI devices behind the same bridge will end up |
| 2163 | * with the same source-id on their transactions. |
| 2164 | * |
| 2165 | * Practically speaking, we can't change things around for these |
| 2166 | * devices at run-time, because we can't be sure there'll be no |
| 2167 | * DMA transactions in flight for any of their siblings. |
| 2168 | * |
| 2169 | * So PCI devices (unless they're on the root bus) as well as |
| 2170 | * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of |
| 2171 | * the 1:1 domain, just in _case_ one of their siblings turns out |
| 2172 | * not to be able to map all of memory. |
| 2173 | */ |
| 2174 | if (!pdev->is_pcie) { |
| 2175 | if (!pci_is_root_bus(pdev->bus)) |
| 2176 | return 0; |
| 2177 | if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) |
| 2178 | return 0; |
| 2179 | } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) |
| 2180 | return 0; |
| 2181 | |
| 2182 | /* |
| 2183 | * At boot time, we don't yet know if devices will be 64-bit capable. |
| 2184 | * Assume that they will -- if they turn out not to be, then we can |
| 2185 | * take them out of the 1:1 domain later. |
| 2186 | */ |
David Woodhouse | 6941af2 | 2009-07-04 18:24:27 +0100 | [diff] [blame] | 2187 | if (!startup) |
| 2188 | return pdev->dma_mask > DMA_BIT_MASK(32); |
| 2189 | |
| 2190 | return 1; |
| 2191 | } |
| 2192 | |
Matt Kraai | 071e137 | 2009-08-23 22:30:22 -0700 | [diff] [blame] | 2193 | static int __init iommu_prepare_static_identity_mapping(int hw) |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2194 | { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2195 | struct pci_dev *pdev = NULL; |
| 2196 | int ret; |
| 2197 | |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2198 | ret = si_domain_init(hw); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2199 | if (ret) |
| 2200 | return -EFAULT; |
| 2201 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2202 | for_each_pci_dev(pdev) { |
David Woodhouse | 6941af2 | 2009-07-04 18:24:27 +0100 | [diff] [blame] | 2203 | if (iommu_should_identity_map(pdev, 1)) { |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2204 | printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", |
| 2205 | hw ? "hardware" : "software", pci_name(pdev)); |
David Woodhouse | c7ab48d | 2009-06-26 19:10:36 +0100 | [diff] [blame] | 2206 | |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 2207 | ret = domain_add_dev_info(si_domain, pdev, |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2208 | hw ? CONTEXT_TT_PASS_THROUGH : |
David Woodhouse | 62edf5d | 2009-07-04 10:59:46 +0100 | [diff] [blame] | 2209 | CONTEXT_TT_MULTI_LEVEL); |
| 2210 | if (ret) |
| 2211 | return ret; |
David Woodhouse | 62edf5d | 2009-07-04 10:59:46 +0100 | [diff] [blame] | 2212 | } |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2213 | } |
| 2214 | |
| 2215 | return 0; |
| 2216 | } |
| 2217 | |
| 2218 | int __init init_dmars(void) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2219 | { |
| 2220 | struct dmar_drhd_unit *drhd; |
| 2221 | struct dmar_rmrr_unit *rmrr; |
| 2222 | struct pci_dev *pdev; |
| 2223 | struct intel_iommu *iommu; |
Suresh Siddha | 9d783ba | 2009-03-16 17:04:55 -0700 | [diff] [blame] | 2224 | int i, ret; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2225 | |
| 2226 | /* |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2227 | * for each drhd |
| 2228 | * allocate root |
| 2229 | * initialize and program root entry to not present |
| 2230 | * endfor |
| 2231 | */ |
| 2232 | for_each_drhd_unit(drhd) { |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2233 | g_num_of_iommus++; |
| 2234 | /* |
| 2235 | * lock not needed as this is only incremented in the single |
| 2236 | * threaded kernel __init code path all other access are read |
| 2237 | * only |
| 2238 | */ |
| 2239 | } |
| 2240 | |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2241 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), |
| 2242 | GFP_KERNEL); |
| 2243 | if (!g_iommus) { |
| 2244 | printk(KERN_ERR "Allocating global iommu array failed\n"); |
| 2245 | ret = -ENOMEM; |
| 2246 | goto error; |
| 2247 | } |
| 2248 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2249 | deferred_flush = kzalloc(g_num_of_iommus * |
| 2250 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
| 2251 | if (!deferred_flush) { |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2252 | ret = -ENOMEM; |
| 2253 | goto error; |
| 2254 | } |
| 2255 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2256 | for_each_drhd_unit(drhd) { |
| 2257 | if (drhd->ignored) |
| 2258 | continue; |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 2259 | |
| 2260 | iommu = drhd->iommu; |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2261 | g_iommus[iommu->seq_id] = iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2262 | |
Suresh Siddha | e61d98d | 2008-07-10 11:16:35 -0700 | [diff] [blame] | 2263 | ret = iommu_init_domains(iommu); |
| 2264 | if (ret) |
| 2265 | goto error; |
| 2266 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2267 | /* |
| 2268 | * TBD: |
| 2269 | * we could share the same root & context tables |
| 2270 | * amoung all IOMMU's. Need to Split it later. |
| 2271 | */ |
| 2272 | ret = iommu_alloc_root_entry(iommu); |
| 2273 | if (ret) { |
| 2274 | printk(KERN_ERR "IOMMU: allocate root entry failed\n"); |
| 2275 | goto error; |
| 2276 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2277 | if (!ecap_pass_through(iommu->ecap)) |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2278 | hw_pass_through = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2279 | } |
| 2280 | |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 2281 | /* |
| 2282 | * Start from the sane iommu hardware state. |
| 2283 | */ |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2284 | for_each_drhd_unit(drhd) { |
| 2285 | if (drhd->ignored) |
| 2286 | continue; |
| 2287 | |
| 2288 | iommu = drhd->iommu; |
Suresh Siddha | 1531a6a | 2009-03-16 17:04:57 -0700 | [diff] [blame] | 2289 | |
| 2290 | /* |
| 2291 | * If the queued invalidation is already initialized by us |
| 2292 | * (for example, while enabling interrupt-remapping) then |
| 2293 | * we got the things already rolling from a sane state. |
| 2294 | */ |
| 2295 | if (iommu->qi) |
| 2296 | continue; |
| 2297 | |
| 2298 | /* |
| 2299 | * Clear any previous faults. |
| 2300 | */ |
| 2301 | dmar_fault(-1, iommu); |
| 2302 | /* |
| 2303 | * Disable queued invalidation if supported and already enabled |
| 2304 | * before OS handover. |
| 2305 | */ |
| 2306 | dmar_disable_qi(iommu); |
| 2307 | } |
| 2308 | |
| 2309 | for_each_drhd_unit(drhd) { |
| 2310 | if (drhd->ignored) |
| 2311 | continue; |
| 2312 | |
| 2313 | iommu = drhd->iommu; |
| 2314 | |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2315 | if (dmar_enable_qi(iommu)) { |
| 2316 | /* |
| 2317 | * Queued Invalidate not enabled, use Register Based |
| 2318 | * Invalidate |
| 2319 | */ |
| 2320 | iommu->flush.flush_context = __iommu_flush_context; |
| 2321 | iommu->flush.flush_iotlb = __iommu_flush_iotlb; |
| 2322 | printk(KERN_INFO "IOMMU 0x%Lx: using Register based " |
FUJITA Tomonori | b4e0f9e | 2008-11-19 13:53:42 +0900 | [diff] [blame] | 2323 | "invalidation\n", |
| 2324 | (unsigned long long)drhd->reg_base_addr); |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2325 | } else { |
| 2326 | iommu->flush.flush_context = qi_flush_context; |
| 2327 | iommu->flush.flush_iotlb = qi_flush_iotlb; |
| 2328 | printk(KERN_INFO "IOMMU 0x%Lx: using Queued " |
FUJITA Tomonori | b4e0f9e | 2008-11-19 13:53:42 +0900 | [diff] [blame] | 2329 | "invalidation\n", |
| 2330 | (unsigned long long)drhd->reg_base_addr); |
Youquan Song | a77b67d | 2008-10-16 16:31:56 -0700 | [diff] [blame] | 2331 | } |
| 2332 | } |
| 2333 | |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2334 | if (iommu_pass_through) |
| 2335 | iommu_identity_mapping = 1; |
| 2336 | #ifdef CONFIG_DMAR_BROKEN_GFX_WA |
| 2337 | else |
| 2338 | iommu_identity_mapping = 2; |
| 2339 | #endif |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2340 | /* |
| 2341 | * If pass through is not set or not enabled, setup context entries for |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2342 | * identity mappings for rmrr, gfx, and isa and may fall back to static |
| 2343 | * identity mapping if iommu_identity_mapping is set. |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2344 | */ |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2345 | if (iommu_identity_mapping) { |
| 2346 | ret = iommu_prepare_static_identity_mapping(hw_pass_through); |
| 2347 | if (ret) { |
| 2348 | printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); |
| 2349 | goto error; |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2350 | } |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2351 | } |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 2352 | /* |
| 2353 | * For each rmrr |
| 2354 | * for each dev attached to rmrr |
| 2355 | * do |
| 2356 | * locate drhd for dev, alloc domain for dev |
| 2357 | * allocate free domain |
| 2358 | * allocate page table entries for rmrr |
| 2359 | * if context not allocated for bus |
| 2360 | * allocate and init context |
| 2361 | * set present in root table for this bus |
| 2362 | * init context with domain, translation etc |
| 2363 | * endfor |
| 2364 | * endfor |
| 2365 | */ |
| 2366 | printk(KERN_INFO "IOMMU: Setting RMRR:\n"); |
| 2367 | for_each_rmrr_units(rmrr) { |
| 2368 | for (i = 0; i < rmrr->devices_cnt; i++) { |
| 2369 | pdev = rmrr->devices[i]; |
| 2370 | /* |
| 2371 | * some BIOS lists non-exist devices in DMAR |
| 2372 | * table. |
| 2373 | */ |
| 2374 | if (!pdev) |
| 2375 | continue; |
| 2376 | ret = iommu_prepare_rmrr_dev(rmrr, pdev); |
| 2377 | if (ret) |
| 2378 | printk(KERN_ERR |
| 2379 | "IOMMU: mapping reserved region failed\n"); |
| 2380 | } |
| 2381 | } |
| 2382 | |
| 2383 | iommu_prepare_isa(); |
Keshavamurthy, Anil S | 49a0429 | 2007-10-21 16:41:57 -0700 | [diff] [blame] | 2384 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2385 | /* |
| 2386 | * for each drhd |
| 2387 | * enable fault log |
| 2388 | * global invalidate context cache |
| 2389 | * global invalidate iotlb |
| 2390 | * enable translation |
| 2391 | */ |
| 2392 | for_each_drhd_unit(drhd) { |
| 2393 | if (drhd->ignored) |
| 2394 | continue; |
| 2395 | iommu = drhd->iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2396 | |
| 2397 | iommu_flush_write_buffer(iommu); |
| 2398 | |
Keshavamurthy, Anil S | 3460a6d | 2007-10-21 16:41:54 -0700 | [diff] [blame] | 2399 | ret = dmar_set_interrupt(iommu); |
| 2400 | if (ret) |
| 2401 | goto error; |
| 2402 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2403 | iommu_set_root_entry(iommu); |
| 2404 | |
David Woodhouse | 4c25a2c | 2009-05-10 17:16:06 +0100 | [diff] [blame] | 2405 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2406 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); |
mark gross | f8bab73 | 2008-02-08 04:18:38 -0800 | [diff] [blame] | 2407 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2408 | ret = iommu_enable_translation(iommu); |
| 2409 | if (ret) |
| 2410 | goto error; |
David Woodhouse | b94996c | 2009-09-19 15:28:12 -0700 | [diff] [blame] | 2411 | |
| 2412 | iommu_disable_protect_mem_regions(iommu); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2413 | } |
| 2414 | |
| 2415 | return 0; |
| 2416 | error: |
| 2417 | for_each_drhd_unit(drhd) { |
| 2418 | if (drhd->ignored) |
| 2419 | continue; |
| 2420 | iommu = drhd->iommu; |
| 2421 | free_iommu(iommu); |
| 2422 | } |
Weidong Han | d9630fe | 2008-12-08 11:06:32 +0800 | [diff] [blame] | 2423 | kfree(g_iommus); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2424 | return ret; |
| 2425 | } |
| 2426 | |
David Woodhouse | 5a5e02a | 2009-07-04 09:35:44 +0100 | [diff] [blame] | 2427 | /* This takes a number of _MM_ pages, not VTD pages */ |
David Woodhouse | 875764d | 2009-06-28 21:20:51 +0100 | [diff] [blame] | 2428 | static struct iova *intel_alloc_iova(struct device *dev, |
| 2429 | struct dmar_domain *domain, |
| 2430 | unsigned long nrpages, uint64_t dma_mask) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2431 | { |
| 2432 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2433 | struct iova *iova = NULL; |
| 2434 | |
David Woodhouse | 875764d | 2009-06-28 21:20:51 +0100 | [diff] [blame] | 2435 | /* Restrict dma_mask to the width that the iommu can handle */ |
| 2436 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
| 2437 | |
| 2438 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2439 | /* |
| 2440 | * First try to allocate an io virtual address in |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 2441 | * DMA_BIT_MASK(32) and if that fails then try allocating |
Joe Perches | 3609801 | 2007-12-17 11:40:11 -0800 | [diff] [blame] | 2442 | * from higher range |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2443 | */ |
David Woodhouse | 875764d | 2009-06-28 21:20:51 +0100 | [diff] [blame] | 2444 | iova = alloc_iova(&domain->iovad, nrpages, |
| 2445 | IOVA_PFN(DMA_BIT_MASK(32)), 1); |
| 2446 | if (iova) |
| 2447 | return iova; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2448 | } |
David Woodhouse | 875764d | 2009-06-28 21:20:51 +0100 | [diff] [blame] | 2449 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); |
| 2450 | if (unlikely(!iova)) { |
| 2451 | printk(KERN_ERR "Allocating %ld-page iova for %s failed", |
| 2452 | nrpages, pci_name(pdev)); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2453 | return NULL; |
| 2454 | } |
| 2455 | |
| 2456 | return iova; |
| 2457 | } |
| 2458 | |
David Woodhouse | 147202a | 2009-07-07 19:43:20 +0100 | [diff] [blame] | 2459 | static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2460 | { |
| 2461 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2462 | int ret; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2463 | |
| 2464 | domain = get_domain_for_dev(pdev, |
| 2465 | DEFAULT_DOMAIN_ADDRESS_WIDTH); |
| 2466 | if (!domain) { |
| 2467 | printk(KERN_ERR |
| 2468 | "Allocating domain for %s failed", pci_name(pdev)); |
Al Viro | 4fe05bb | 2007-10-29 04:51:16 +0000 | [diff] [blame] | 2469 | return NULL; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2470 | } |
| 2471 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2472 | /* make sure context mapping is ok */ |
Weidong Han | 5331fe6 | 2008-12-08 23:00:00 +0800 | [diff] [blame] | 2473 | if (unlikely(!domain_context_mapped(pdev))) { |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 2474 | ret = domain_context_mapping(domain, pdev, |
| 2475 | CONTEXT_TT_MULTI_LEVEL); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2476 | if (ret) { |
| 2477 | printk(KERN_ERR |
| 2478 | "Domain context map for %s failed", |
| 2479 | pci_name(pdev)); |
Al Viro | 4fe05bb | 2007-10-29 04:51:16 +0000 | [diff] [blame] | 2480 | return NULL; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2481 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2482 | } |
| 2483 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2484 | return domain; |
| 2485 | } |
| 2486 | |
David Woodhouse | 147202a | 2009-07-07 19:43:20 +0100 | [diff] [blame] | 2487 | static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) |
| 2488 | { |
| 2489 | struct device_domain_info *info; |
| 2490 | |
| 2491 | /* No lock here, assumes no domain exit in normal case */ |
| 2492 | info = dev->dev.archdata.iommu; |
| 2493 | if (likely(info)) |
| 2494 | return info->domain; |
| 2495 | |
| 2496 | return __get_valid_domain_for_dev(dev); |
| 2497 | } |
| 2498 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2499 | static int iommu_dummy(struct pci_dev *pdev) |
| 2500 | { |
| 2501 | return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; |
| 2502 | } |
| 2503 | |
| 2504 | /* Check if the pdev needs to go through non-identity map and unmap process.*/ |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2505 | static int iommu_no_mapping(struct device *dev) |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2506 | { |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2507 | struct pci_dev *pdev; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2508 | int found; |
| 2509 | |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2510 | if (unlikely(dev->bus != &pci_bus_type)) |
| 2511 | return 1; |
| 2512 | |
| 2513 | pdev = to_pci_dev(dev); |
David Woodhouse | 1e4c64c | 2009-07-04 10:40:38 +0100 | [diff] [blame] | 2514 | if (iommu_dummy(pdev)) |
| 2515 | return 1; |
| 2516 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2517 | if (!iommu_identity_mapping) |
David Woodhouse | 1e4c64c | 2009-07-04 10:40:38 +0100 | [diff] [blame] | 2518 | return 0; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2519 | |
| 2520 | found = identity_mapping(pdev); |
| 2521 | if (found) { |
David Woodhouse | 6941af2 | 2009-07-04 18:24:27 +0100 | [diff] [blame] | 2522 | if (iommu_should_identity_map(pdev, 0)) |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2523 | return 1; |
| 2524 | else { |
| 2525 | /* |
| 2526 | * 32 bit DMA is removed from si_domain and fall back |
| 2527 | * to non-identity mapping. |
| 2528 | */ |
| 2529 | domain_remove_one_dev_info(si_domain, pdev); |
| 2530 | printk(KERN_INFO "32bit %s uses non-identity mapping\n", |
| 2531 | pci_name(pdev)); |
| 2532 | return 0; |
| 2533 | } |
| 2534 | } else { |
| 2535 | /* |
| 2536 | * In case of a detached 64 bit DMA device from vm, the device |
| 2537 | * is put into si_domain for identity mapping. |
| 2538 | */ |
David Woodhouse | 6941af2 | 2009-07-04 18:24:27 +0100 | [diff] [blame] | 2539 | if (iommu_should_identity_map(pdev, 0)) { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2540 | int ret; |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 2541 | ret = domain_add_dev_info(si_domain, pdev, |
| 2542 | hw_pass_through ? |
| 2543 | CONTEXT_TT_PASS_THROUGH : |
| 2544 | CONTEXT_TT_MULTI_LEVEL); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2545 | if (!ret) { |
| 2546 | printk(KERN_INFO "64bit %s uses identity mapping\n", |
| 2547 | pci_name(pdev)); |
| 2548 | return 1; |
| 2549 | } |
| 2550 | } |
| 2551 | } |
| 2552 | |
David Woodhouse | 1e4c64c | 2009-07-04 10:40:38 +0100 | [diff] [blame] | 2553 | return 0; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2554 | } |
| 2555 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2556 | static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, |
| 2557 | size_t size, int dir, u64 dma_mask) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2558 | { |
| 2559 | struct pci_dev *pdev = to_pci_dev(hwdev); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2560 | struct dmar_domain *domain; |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2561 | phys_addr_t start_paddr; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2562 | struct iova *iova; |
| 2563 | int prot = 0; |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2564 | int ret; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2565 | struct intel_iommu *iommu; |
Fenghua Yu | 33041ec | 2009-08-04 15:10:59 -0700 | [diff] [blame] | 2566 | unsigned long paddr_pfn = paddr >> PAGE_SHIFT; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2567 | |
| 2568 | BUG_ON(dir == DMA_NONE); |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2569 | |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2570 | if (iommu_no_mapping(hwdev)) |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2571 | return paddr; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2572 | |
| 2573 | domain = get_valid_domain_for_dev(pdev); |
| 2574 | if (!domain) |
| 2575 | return 0; |
| 2576 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2577 | iommu = domain_get_iommu(domain); |
David Woodhouse | 88cb6a7 | 2009-06-28 15:03:06 +0100 | [diff] [blame] | 2578 | size = aligned_nrpages(paddr, size); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2579 | |
David Woodhouse | 5a5e02a | 2009-07-04 09:35:44 +0100 | [diff] [blame] | 2580 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
| 2581 | pdev->dma_mask); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2582 | if (!iova) |
| 2583 | goto error; |
| 2584 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2585 | /* |
| 2586 | * Check if DMAR supports zero-length reads on write only |
| 2587 | * mappings.. |
| 2588 | */ |
| 2589 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2590 | !cap_zlr(iommu->cap)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2591 | prot |= DMA_PTE_READ; |
| 2592 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 2593 | prot |= DMA_PTE_WRITE; |
| 2594 | /* |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2595 | * paddr - (paddr + size) might be partial page, we should map the whole |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2596 | * page. Note: if two part of one page are separately mapped, we |
Ingo Molnar | 6865f0d | 2008-04-22 11:09:04 +0200 | [diff] [blame] | 2597 | * might have two guest_addr mapping to the same host paddr, but this |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2598 | * is not a big problem |
| 2599 | */ |
David Woodhouse | 0ab36de | 2009-06-28 14:01:43 +0100 | [diff] [blame] | 2600 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
Fenghua Yu | 33041ec | 2009-08-04 15:10:59 -0700 | [diff] [blame] | 2601 | mm_to_dma_pfn(paddr_pfn), size, prot); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2602 | if (ret) |
| 2603 | goto error; |
| 2604 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2605 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 2606 | if (cap_caching_mode(iommu->cap)) |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 2607 | iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2608 | else |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2609 | iommu_flush_write_buffer(iommu); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2610 | |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 2611 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; |
| 2612 | start_paddr += paddr & ~PAGE_MASK; |
| 2613 | return start_paddr; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2614 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2615 | error: |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2616 | if (iova) |
| 2617 | __free_iova(&domain->iovad, iova); |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2618 | printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2619 | pci_name(pdev), size, (unsigned long long)paddr, dir); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2620 | return 0; |
| 2621 | } |
| 2622 | |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2623 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, |
| 2624 | unsigned long offset, size_t size, |
| 2625 | enum dma_data_direction dir, |
| 2626 | struct dma_attrs *attrs) |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2627 | { |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2628 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
| 2629 | dir, to_pci_dev(dev)->dma_mask); |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2630 | } |
| 2631 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2632 | static void flush_unmaps(void) |
| 2633 | { |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2634 | int i, j; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2635 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2636 | timer_on = 0; |
| 2637 | |
| 2638 | /* just flush them all */ |
| 2639 | for (i = 0; i < g_num_of_iommus; i++) { |
Weidong Han | a2bb845 | 2008-12-08 11:24:12 +0800 | [diff] [blame] | 2640 | struct intel_iommu *iommu = g_iommus[i]; |
| 2641 | if (!iommu) |
| 2642 | continue; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 2643 | |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2644 | if (!deferred_flush[i].next) |
| 2645 | continue; |
| 2646 | |
| 2647 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2648 | DMA_TLB_GLOBAL_FLUSH); |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2649 | for (j = 0; j < deferred_flush[i].next; j++) { |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2650 | unsigned long mask; |
| 2651 | struct iova *iova = deferred_flush[i].iova[j]; |
| 2652 | |
Benjamin LaHaise | 64de5af | 2009-09-16 21:05:55 -0400 | [diff] [blame] | 2653 | mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2654 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], |
Benjamin LaHaise | 64de5af | 2009-09-16 21:05:55 -0400 | [diff] [blame] | 2655 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 2656 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2657 | } |
Yu Zhao | 9dd2fe8 | 2009-05-18 13:51:36 +0800 | [diff] [blame] | 2658 | deferred_flush[i].next = 0; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2659 | } |
| 2660 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2661 | list_size = 0; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2662 | } |
| 2663 | |
| 2664 | static void flush_unmaps_timeout(unsigned long data) |
| 2665 | { |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2666 | unsigned long flags; |
| 2667 | |
| 2668 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2669 | flush_unmaps(); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2670 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2671 | } |
| 2672 | |
| 2673 | static void add_unmap(struct dmar_domain *dom, struct iova *iova) |
| 2674 | { |
| 2675 | unsigned long flags; |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2676 | int next, iommu_id; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2677 | struct intel_iommu *iommu; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2678 | |
| 2679 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2680 | if (list_size == HIGH_WATER_MARK) |
| 2681 | flush_unmaps(); |
| 2682 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2683 | iommu = domain_get_iommu(dom); |
| 2684 | iommu_id = iommu->seq_id; |
Suresh Siddha | c42d9f3 | 2008-07-10 11:16:36 -0700 | [diff] [blame] | 2685 | |
mark gross | 80b20dd | 2008-04-18 13:53:58 -0700 | [diff] [blame] | 2686 | next = deferred_flush[iommu_id].next; |
| 2687 | deferred_flush[iommu_id].domain[next] = dom; |
| 2688 | deferred_flush[iommu_id].iova[next] = iova; |
| 2689 | deferred_flush[iommu_id].next++; |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2690 | |
| 2691 | if (!timer_on) { |
| 2692 | mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); |
| 2693 | timer_on = 1; |
| 2694 | } |
| 2695 | list_size++; |
| 2696 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
| 2697 | } |
| 2698 | |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2699 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
| 2700 | size_t size, enum dma_data_direction dir, |
| 2701 | struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2702 | { |
| 2703 | struct pci_dev *pdev = to_pci_dev(dev); |
| 2704 | struct dmar_domain *domain; |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2705 | unsigned long start_pfn, last_pfn; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2706 | struct iova *iova; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2707 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2708 | |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2709 | if (iommu_no_mapping(dev)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2710 | return; |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 2711 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2712 | domain = find_domain(pdev); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2713 | BUG_ON(!domain); |
| 2714 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2715 | iommu = domain_get_iommu(domain); |
| 2716 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2717 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
David Woodhouse | 85b9827 | 2009-07-01 19:27:53 +0100 | [diff] [blame] | 2718 | if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", |
| 2719 | (unsigned long long)dev_addr)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2720 | return; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2721 | |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2722 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
| 2723 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2724 | |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2725 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", |
| 2726 | pci_name(pdev), start_pfn, last_pfn); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2727 | |
| 2728 | /* clear the whole page */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2729 | dma_pte_clear_range(domain, start_pfn, last_pfn); |
| 2730 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2731 | /* free page tables */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2732 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); |
| 2733 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2734 | if (intel_iommu_strict) { |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 2735 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2736 | last_pfn - start_pfn + 1); |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2737 | /* free iova */ |
| 2738 | __free_iova(&domain->iovad, iova); |
| 2739 | } else { |
| 2740 | add_unmap(domain, iova); |
| 2741 | /* |
| 2742 | * queue up the release of the unmap to save the 1/6th of the |
| 2743 | * cpu used up by the iotlb flush operation... |
| 2744 | */ |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 2745 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2746 | } |
| 2747 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2748 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, |
| 2749 | dma_addr_t *dma_handle, gfp_t flags) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2750 | { |
| 2751 | void *vaddr; |
| 2752 | int order; |
| 2753 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2754 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2755 | order = get_order(size); |
| 2756 | flags &= ~(GFP_DMA | GFP_DMA32); |
| 2757 | |
| 2758 | vaddr = (void *)__get_free_pages(flags, order); |
| 2759 | if (!vaddr) |
| 2760 | return NULL; |
| 2761 | memset(vaddr, 0, size); |
| 2762 | |
FUJITA Tomonori | bb9e6d6 | 2008-10-15 16:08:28 +0900 | [diff] [blame] | 2763 | *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, |
| 2764 | DMA_BIDIRECTIONAL, |
| 2765 | hwdev->coherent_dma_mask); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2766 | if (*dma_handle) |
| 2767 | return vaddr; |
| 2768 | free_pages((unsigned long)vaddr, order); |
| 2769 | return NULL; |
| 2770 | } |
| 2771 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2772 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
| 2773 | dma_addr_t dma_handle) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2774 | { |
| 2775 | int order; |
| 2776 | |
Fenghua Yu | 5b6985c | 2008-10-16 18:02:32 -0700 | [diff] [blame] | 2777 | size = PAGE_ALIGN(size); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2778 | order = get_order(size); |
| 2779 | |
David Woodhouse | 0db9b7a | 2009-07-14 02:01:57 +0100 | [diff] [blame] | 2780 | intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2781 | free_pages((unsigned long)vaddr, order); |
| 2782 | } |
| 2783 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2784 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
| 2785 | int nelems, enum dma_data_direction dir, |
| 2786 | struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2787 | { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2788 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| 2789 | struct dmar_domain *domain; |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2790 | unsigned long start_pfn, last_pfn; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2791 | struct iova *iova; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2792 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2793 | |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2794 | if (iommu_no_mapping(hwdev)) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2795 | return; |
| 2796 | |
| 2797 | domain = find_domain(pdev); |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2798 | BUG_ON(!domain); |
| 2799 | |
| 2800 | iommu = domain_get_iommu(domain); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2801 | |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2802 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
David Woodhouse | 85b9827 | 2009-07-01 19:27:53 +0100 | [diff] [blame] | 2803 | if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", |
| 2804 | (unsigned long long)sglist[0].dma_address)) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2805 | return; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2806 | |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2807 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); |
| 2808 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2809 | |
| 2810 | /* clear the whole page */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2811 | dma_pte_clear_range(domain, start_pfn, last_pfn); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2812 | |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 2813 | /* free page tables */ |
| 2814 | dma_pte_free_pagetable(domain, start_pfn, last_pfn); |
| 2815 | |
David Woodhouse | acea001 | 2009-07-14 01:55:11 +0100 | [diff] [blame] | 2816 | if (intel_iommu_strict) { |
| 2817 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, |
| 2818 | last_pfn - start_pfn + 1); |
| 2819 | /* free iova */ |
| 2820 | __free_iova(&domain->iovad, iova); |
| 2821 | } else { |
| 2822 | add_unmap(domain, iova); |
| 2823 | /* |
| 2824 | * queue up the release of the unmap to save the 1/6th of the |
| 2825 | * cpu used up by the iotlb flush operation... |
| 2826 | */ |
| 2827 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2828 | } |
| 2829 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2830 | static int intel_nontranslate_map_sg(struct device *hddev, |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2831 | struct scatterlist *sglist, int nelems, int dir) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2832 | { |
| 2833 | int i; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2834 | struct scatterlist *sg; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2835 | |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2836 | for_each_sg(sglist, sg, nelems, i) { |
FUJITA Tomonori | 12d4d40 | 2007-10-23 09:32:25 +0200 | [diff] [blame] | 2837 | BUG_ON(!sg_page(sg)); |
David Woodhouse | 4cf2e75 | 2009-02-11 17:23:43 +0000 | [diff] [blame] | 2838 | sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2839 | sg->dma_length = sg->length; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2840 | } |
| 2841 | return nelems; |
| 2842 | } |
| 2843 | |
FUJITA Tomonori | d7ab5c4 | 2009-01-28 21:53:18 +0900 | [diff] [blame] | 2844 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
| 2845 | enum dma_data_direction dir, struct dma_attrs *attrs) |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2846 | { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2847 | int i; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2848 | struct pci_dev *pdev = to_pci_dev(hwdev); |
| 2849 | struct dmar_domain *domain; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2850 | size_t size = 0; |
| 2851 | int prot = 0; |
David Woodhouse | b536d24 | 2009-06-28 14:49:31 +0100 | [diff] [blame] | 2852 | size_t offset_pfn = 0; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2853 | struct iova *iova = NULL; |
| 2854 | int ret; |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2855 | struct scatterlist *sg; |
David Woodhouse | b536d24 | 2009-06-28 14:49:31 +0100 | [diff] [blame] | 2856 | unsigned long start_vpfn; |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2857 | struct intel_iommu *iommu; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2858 | |
| 2859 | BUG_ON(dir == DMA_NONE); |
David Woodhouse | 7367683 | 2009-07-04 14:08:36 +0100 | [diff] [blame] | 2860 | if (iommu_no_mapping(hwdev)) |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2861 | return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2862 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2863 | domain = get_valid_domain_for_dev(pdev); |
| 2864 | if (!domain) |
| 2865 | return 0; |
| 2866 | |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2867 | iommu = domain_get_iommu(domain); |
| 2868 | |
David Woodhouse | b536d24 | 2009-06-28 14:49:31 +0100 | [diff] [blame] | 2869 | for_each_sg(sglist, sg, nelems, i) |
David Woodhouse | 88cb6a7 | 2009-06-28 15:03:06 +0100 | [diff] [blame] | 2870 | size += aligned_nrpages(sg->offset, sg->length); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2871 | |
David Woodhouse | 5a5e02a | 2009-07-04 09:35:44 +0100 | [diff] [blame] | 2872 | iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), |
| 2873 | pdev->dma_mask); |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2874 | if (!iova) { |
FUJITA Tomonori | c03ab37 | 2007-10-21 16:42:00 -0700 | [diff] [blame] | 2875 | sglist->dma_length = 0; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2876 | return 0; |
| 2877 | } |
| 2878 | |
| 2879 | /* |
| 2880 | * Check if DMAR supports zero-length reads on write only |
| 2881 | * mappings.. |
| 2882 | */ |
| 2883 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2884 | !cap_zlr(iommu->cap)) |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2885 | prot |= DMA_PTE_READ; |
| 2886 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 2887 | prot |= DMA_PTE_WRITE; |
| 2888 | |
David Woodhouse | b536d24 | 2009-06-28 14:49:31 +0100 | [diff] [blame] | 2889 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 2890 | |
Fenghua Yu | f532959 | 2009-08-04 15:09:37 -0700 | [diff] [blame] | 2891 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
David Woodhouse | e160549 | 2009-06-29 11:17:38 +0100 | [diff] [blame] | 2892 | if (unlikely(ret)) { |
| 2893 | /* clear the page */ |
| 2894 | dma_pte_clear_range(domain, start_vpfn, |
| 2895 | start_vpfn + size - 1); |
| 2896 | /* free page tables */ |
| 2897 | dma_pte_free_pagetable(domain, start_vpfn, |
| 2898 | start_vpfn + size - 1); |
| 2899 | /* free iova */ |
| 2900 | __free_iova(&domain->iovad, iova); |
| 2901 | return 0; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 2902 | } |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2903 | |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2904 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 2905 | if (cap_caching_mode(iommu->cap)) |
David Woodhouse | 03d6a24 | 2009-06-28 15:33:46 +0100 | [diff] [blame] | 2906 | iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2907 | else |
Weidong Han | 8c11e79 | 2008-12-08 15:29:22 +0800 | [diff] [blame] | 2908 | iommu_flush_write_buffer(iommu); |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 2909 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2910 | return nelems; |
| 2911 | } |
| 2912 | |
FUJITA Tomonori | dfb805e | 2009-01-28 21:53:17 +0900 | [diff] [blame] | 2913 | static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 2914 | { |
| 2915 | return !dma_addr; |
| 2916 | } |
| 2917 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 2918 | struct dma_map_ops intel_dma_ops = { |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2919 | .alloc_coherent = intel_alloc_coherent, |
| 2920 | .free_coherent = intel_free_coherent, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2921 | .map_sg = intel_map_sg, |
| 2922 | .unmap_sg = intel_unmap_sg, |
FUJITA Tomonori | ffbbef5 | 2009-01-05 23:47:26 +0900 | [diff] [blame] | 2923 | .map_page = intel_map_page, |
| 2924 | .unmap_page = intel_unmap_page, |
FUJITA Tomonori | dfb805e | 2009-01-28 21:53:17 +0900 | [diff] [blame] | 2925 | .mapping_error = intel_mapping_error, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2926 | }; |
| 2927 | |
| 2928 | static inline int iommu_domain_cache_init(void) |
| 2929 | { |
| 2930 | int ret = 0; |
| 2931 | |
| 2932 | iommu_domain_cache = kmem_cache_create("iommu_domain", |
| 2933 | sizeof(struct dmar_domain), |
| 2934 | 0, |
| 2935 | SLAB_HWCACHE_ALIGN, |
| 2936 | |
| 2937 | NULL); |
| 2938 | if (!iommu_domain_cache) { |
| 2939 | printk(KERN_ERR "Couldn't create iommu_domain cache\n"); |
| 2940 | ret = -ENOMEM; |
| 2941 | } |
| 2942 | |
| 2943 | return ret; |
| 2944 | } |
| 2945 | |
| 2946 | static inline int iommu_devinfo_cache_init(void) |
| 2947 | { |
| 2948 | int ret = 0; |
| 2949 | |
| 2950 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", |
| 2951 | sizeof(struct device_domain_info), |
| 2952 | 0, |
| 2953 | SLAB_HWCACHE_ALIGN, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2954 | NULL); |
| 2955 | if (!iommu_devinfo_cache) { |
| 2956 | printk(KERN_ERR "Couldn't create devinfo cache\n"); |
| 2957 | ret = -ENOMEM; |
| 2958 | } |
| 2959 | |
| 2960 | return ret; |
| 2961 | } |
| 2962 | |
| 2963 | static inline int iommu_iova_cache_init(void) |
| 2964 | { |
| 2965 | int ret = 0; |
| 2966 | |
| 2967 | iommu_iova_cache = kmem_cache_create("iommu_iova", |
| 2968 | sizeof(struct iova), |
| 2969 | 0, |
| 2970 | SLAB_HWCACHE_ALIGN, |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 2971 | NULL); |
| 2972 | if (!iommu_iova_cache) { |
| 2973 | printk(KERN_ERR "Couldn't create iova cache\n"); |
| 2974 | ret = -ENOMEM; |
| 2975 | } |
| 2976 | |
| 2977 | return ret; |
| 2978 | } |
| 2979 | |
| 2980 | static int __init iommu_init_mempool(void) |
| 2981 | { |
| 2982 | int ret; |
| 2983 | ret = iommu_iova_cache_init(); |
| 2984 | if (ret) |
| 2985 | return ret; |
| 2986 | |
| 2987 | ret = iommu_domain_cache_init(); |
| 2988 | if (ret) |
| 2989 | goto domain_error; |
| 2990 | |
| 2991 | ret = iommu_devinfo_cache_init(); |
| 2992 | if (!ret) |
| 2993 | return ret; |
| 2994 | |
| 2995 | kmem_cache_destroy(iommu_domain_cache); |
| 2996 | domain_error: |
| 2997 | kmem_cache_destroy(iommu_iova_cache); |
| 2998 | |
| 2999 | return -ENOMEM; |
| 3000 | } |
| 3001 | |
| 3002 | static void __init iommu_exit_mempool(void) |
| 3003 | { |
| 3004 | kmem_cache_destroy(iommu_devinfo_cache); |
| 3005 | kmem_cache_destroy(iommu_domain_cache); |
| 3006 | kmem_cache_destroy(iommu_iova_cache); |
| 3007 | |
| 3008 | } |
| 3009 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3010 | static void __init init_no_remapping_devices(void) |
| 3011 | { |
| 3012 | struct dmar_drhd_unit *drhd; |
| 3013 | |
| 3014 | for_each_drhd_unit(drhd) { |
| 3015 | if (!drhd->include_all) { |
| 3016 | int i; |
| 3017 | for (i = 0; i < drhd->devices_cnt; i++) |
| 3018 | if (drhd->devices[i] != NULL) |
| 3019 | break; |
| 3020 | /* ignore DMAR unit if no pci devices exist */ |
| 3021 | if (i == drhd->devices_cnt) |
| 3022 | drhd->ignored = 1; |
| 3023 | } |
| 3024 | } |
| 3025 | |
| 3026 | if (dmar_map_gfx) |
| 3027 | return; |
| 3028 | |
| 3029 | for_each_drhd_unit(drhd) { |
| 3030 | int i; |
| 3031 | if (drhd->ignored || drhd->include_all) |
| 3032 | continue; |
| 3033 | |
| 3034 | for (i = 0; i < drhd->devices_cnt; i++) |
| 3035 | if (drhd->devices[i] && |
| 3036 | !IS_GFX_DEVICE(drhd->devices[i])) |
| 3037 | break; |
| 3038 | |
| 3039 | if (i < drhd->devices_cnt) |
| 3040 | continue; |
| 3041 | |
| 3042 | /* bypass IOMMU if it is just for gfx devices */ |
| 3043 | drhd->ignored = 1; |
| 3044 | for (i = 0; i < drhd->devices_cnt; i++) { |
| 3045 | if (!drhd->devices[i]) |
| 3046 | continue; |
Keshavamurthy, Anil S | 358dd8a | 2007-10-21 16:41:59 -0700 | [diff] [blame] | 3047 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3048 | } |
| 3049 | } |
| 3050 | } |
| 3051 | |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3052 | #ifdef CONFIG_SUSPEND |
| 3053 | static int init_iommu_hw(void) |
| 3054 | { |
| 3055 | struct dmar_drhd_unit *drhd; |
| 3056 | struct intel_iommu *iommu = NULL; |
| 3057 | |
| 3058 | for_each_active_iommu(iommu, drhd) |
| 3059 | if (iommu->qi) |
| 3060 | dmar_reenable_qi(iommu); |
| 3061 | |
| 3062 | for_each_active_iommu(iommu, drhd) { |
| 3063 | iommu_flush_write_buffer(iommu); |
| 3064 | |
| 3065 | iommu_set_root_entry(iommu); |
| 3066 | |
| 3067 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3068 | DMA_CCMD_GLOBAL_INVL); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3069 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3070 | DMA_TLB_GLOBAL_FLUSH); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3071 | iommu_enable_translation(iommu); |
David Woodhouse | b94996c | 2009-09-19 15:28:12 -0700 | [diff] [blame] | 3072 | iommu_disable_protect_mem_regions(iommu); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3073 | } |
| 3074 | |
| 3075 | return 0; |
| 3076 | } |
| 3077 | |
| 3078 | static void iommu_flush_all(void) |
| 3079 | { |
| 3080 | struct dmar_drhd_unit *drhd; |
| 3081 | struct intel_iommu *iommu; |
| 3082 | |
| 3083 | for_each_active_iommu(iommu, drhd) { |
| 3084 | iommu->flush.flush_context(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3085 | DMA_CCMD_GLOBAL_INVL); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3086 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
David Woodhouse | 1f0ef2a | 2009-05-10 19:58:49 +0100 | [diff] [blame] | 3087 | DMA_TLB_GLOBAL_FLUSH); |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3088 | } |
| 3089 | } |
| 3090 | |
| 3091 | static int iommu_suspend(struct sys_device *dev, pm_message_t state) |
| 3092 | { |
| 3093 | struct dmar_drhd_unit *drhd; |
| 3094 | struct intel_iommu *iommu = NULL; |
| 3095 | unsigned long flag; |
| 3096 | |
| 3097 | for_each_active_iommu(iommu, drhd) { |
| 3098 | iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, |
| 3099 | GFP_ATOMIC); |
| 3100 | if (!iommu->iommu_state) |
| 3101 | goto nomem; |
| 3102 | } |
| 3103 | |
| 3104 | iommu_flush_all(); |
| 3105 | |
| 3106 | for_each_active_iommu(iommu, drhd) { |
| 3107 | iommu_disable_translation(iommu); |
| 3108 | |
| 3109 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 3110 | |
| 3111 | iommu->iommu_state[SR_DMAR_FECTL_REG] = |
| 3112 | readl(iommu->reg + DMAR_FECTL_REG); |
| 3113 | iommu->iommu_state[SR_DMAR_FEDATA_REG] = |
| 3114 | readl(iommu->reg + DMAR_FEDATA_REG); |
| 3115 | iommu->iommu_state[SR_DMAR_FEADDR_REG] = |
| 3116 | readl(iommu->reg + DMAR_FEADDR_REG); |
| 3117 | iommu->iommu_state[SR_DMAR_FEUADDR_REG] = |
| 3118 | readl(iommu->reg + DMAR_FEUADDR_REG); |
| 3119 | |
| 3120 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3121 | } |
| 3122 | return 0; |
| 3123 | |
| 3124 | nomem: |
| 3125 | for_each_active_iommu(iommu, drhd) |
| 3126 | kfree(iommu->iommu_state); |
| 3127 | |
| 3128 | return -ENOMEM; |
| 3129 | } |
| 3130 | |
| 3131 | static int iommu_resume(struct sys_device *dev) |
| 3132 | { |
| 3133 | struct dmar_drhd_unit *drhd; |
| 3134 | struct intel_iommu *iommu = NULL; |
| 3135 | unsigned long flag; |
| 3136 | |
| 3137 | if (init_iommu_hw()) { |
| 3138 | WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); |
| 3139 | return -EIO; |
| 3140 | } |
| 3141 | |
| 3142 | for_each_active_iommu(iommu, drhd) { |
| 3143 | |
| 3144 | spin_lock_irqsave(&iommu->register_lock, flag); |
| 3145 | |
| 3146 | writel(iommu->iommu_state[SR_DMAR_FECTL_REG], |
| 3147 | iommu->reg + DMAR_FECTL_REG); |
| 3148 | writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], |
| 3149 | iommu->reg + DMAR_FEDATA_REG); |
| 3150 | writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], |
| 3151 | iommu->reg + DMAR_FEADDR_REG); |
| 3152 | writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], |
| 3153 | iommu->reg + DMAR_FEUADDR_REG); |
| 3154 | |
| 3155 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
| 3156 | } |
| 3157 | |
| 3158 | for_each_active_iommu(iommu, drhd) |
| 3159 | kfree(iommu->iommu_state); |
| 3160 | |
| 3161 | return 0; |
| 3162 | } |
| 3163 | |
| 3164 | static struct sysdev_class iommu_sysclass = { |
| 3165 | .name = "iommu", |
| 3166 | .resume = iommu_resume, |
| 3167 | .suspend = iommu_suspend, |
| 3168 | }; |
| 3169 | |
| 3170 | static struct sys_device device_iommu = { |
| 3171 | .cls = &iommu_sysclass, |
| 3172 | }; |
| 3173 | |
| 3174 | static int __init init_iommu_sysfs(void) |
| 3175 | { |
| 3176 | int error; |
| 3177 | |
| 3178 | error = sysdev_class_register(&iommu_sysclass); |
| 3179 | if (error) |
| 3180 | return error; |
| 3181 | |
| 3182 | error = sysdev_register(&device_iommu); |
| 3183 | if (error) |
| 3184 | sysdev_class_unregister(&iommu_sysclass); |
| 3185 | |
| 3186 | return error; |
| 3187 | } |
| 3188 | |
| 3189 | #else |
| 3190 | static int __init init_iommu_sysfs(void) |
| 3191 | { |
| 3192 | return 0; |
| 3193 | } |
| 3194 | #endif /* CONFIG_PM */ |
| 3195 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3196 | int __init intel_iommu_init(void) |
| 3197 | { |
| 3198 | int ret = 0; |
Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 3199 | int force_on = 0; |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3200 | |
Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 3201 | /* VT-d is required for a TXT/tboot launch, so enforce that */ |
| 3202 | force_on = tboot_force_iommu(); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3203 | |
Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 3204 | if (dmar_table_init()) { |
| 3205 | if (force_on) |
| 3206 | panic("tboot: Failed to initialize DMAR table\n"); |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 3207 | return -ENODEV; |
Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 3208 | } |
| 3209 | |
| 3210 | if (dmar_dev_scope_init()) { |
| 3211 | if (force_on) |
| 3212 | panic("tboot: Failed to initialize DMAR device scope\n"); |
| 3213 | return -ENODEV; |
| 3214 | } |
Suresh Siddha | 1886e8a | 2008-07-10 11:16:37 -0700 | [diff] [blame] | 3215 | |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 3216 | /* |
| 3217 | * Check the need for DMA-remapping initialization now. |
| 3218 | * Above initialization will also be used by Interrupt-remapping. |
| 3219 | */ |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 3220 | if (no_iommu || swiotlb || dmar_disabled) |
Suresh Siddha | 2ae2101 | 2008-07-10 11:16:43 -0700 | [diff] [blame] | 3221 | return -ENODEV; |
| 3222 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3223 | iommu_init_mempool(); |
| 3224 | dmar_init_reserved_ranges(); |
| 3225 | |
| 3226 | init_no_remapping_devices(); |
| 3227 | |
| 3228 | ret = init_dmars(); |
| 3229 | if (ret) { |
Joseph Cihula | a59b50e | 2009-06-30 19:31:10 -0700 | [diff] [blame] | 3230 | if (force_on) |
| 3231 | panic("tboot: Failed to initialize DMARs\n"); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3232 | printk(KERN_ERR "IOMMU: dmar init failed\n"); |
| 3233 | put_iova_domain(&reserved_iova_list); |
| 3234 | iommu_exit_mempool(); |
| 3235 | return ret; |
| 3236 | } |
| 3237 | printk(KERN_INFO |
| 3238 | "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); |
| 3239 | |
mark gross | 5e0d2a6 | 2008-03-04 15:22:08 -0800 | [diff] [blame] | 3240 | init_timer(&unmap_timer); |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3241 | force_iommu = 1; |
David Woodhouse | 19943b0 | 2009-08-04 16:19:20 +0100 | [diff] [blame] | 3242 | dma_ops = &intel_dma_ops; |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 3243 | |
Fenghua Yu | f59c7b6 | 2009-03-27 14:22:42 -0700 | [diff] [blame] | 3244 | init_iommu_sysfs(); |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3245 | |
| 3246 | register_iommu(&intel_iommu_ops); |
| 3247 | |
Keshavamurthy, Anil S | ba39592 | 2007-10-21 16:41:49 -0700 | [diff] [blame] | 3248 | return 0; |
| 3249 | } |
Keshavamurthy, Anil S | e820482 | 2007-10-21 16:41:55 -0700 | [diff] [blame] | 3250 | |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3251 | static void iommu_detach_dependent_devices(struct intel_iommu *iommu, |
| 3252 | struct pci_dev *pdev) |
| 3253 | { |
| 3254 | struct pci_dev *tmp, *parent; |
| 3255 | |
| 3256 | if (!iommu || !pdev) |
| 3257 | return; |
| 3258 | |
| 3259 | /* dependent device detach */ |
| 3260 | tmp = pci_find_upstream_pcie_bridge(pdev); |
| 3261 | /* Secondary interface's bus number and devfn 0 */ |
| 3262 | if (tmp) { |
| 3263 | parent = pdev->bus->self; |
| 3264 | while (parent != tmp) { |
| 3265 | iommu_detach_dev(iommu, parent->bus->number, |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3266 | parent->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3267 | parent = parent->bus->self; |
| 3268 | } |
| 3269 | if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ |
| 3270 | iommu_detach_dev(iommu, |
| 3271 | tmp->subordinate->number, 0); |
| 3272 | else /* this is a legacy PCI bridge */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3273 | iommu_detach_dev(iommu, tmp->bus->number, |
| 3274 | tmp->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3275 | } |
| 3276 | } |
| 3277 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 3278 | static void domain_remove_one_dev_info(struct dmar_domain *domain, |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3279 | struct pci_dev *pdev) |
| 3280 | { |
| 3281 | struct device_domain_info *info; |
| 3282 | struct intel_iommu *iommu; |
| 3283 | unsigned long flags; |
| 3284 | int found = 0; |
| 3285 | struct list_head *entry, *tmp; |
| 3286 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3287 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 3288 | pdev->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3289 | if (!iommu) |
| 3290 | return; |
| 3291 | |
| 3292 | spin_lock_irqsave(&device_domain_lock, flags); |
| 3293 | list_for_each_safe(entry, tmp, &domain->devices) { |
| 3294 | info = list_entry(entry, struct device_domain_info, link); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3295 | /* No need to compare PCI domain; it has to be the same */ |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3296 | if (info->bus == pdev->bus->number && |
| 3297 | info->devfn == pdev->devfn) { |
| 3298 | list_del(&info->link); |
| 3299 | list_del(&info->global); |
| 3300 | if (info->dev) |
| 3301 | info->dev->dev.archdata.iommu = NULL; |
| 3302 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 3303 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 3304 | iommu_disable_dev_iotlb(info); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3305 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3306 | iommu_detach_dependent_devices(iommu, pdev); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3307 | free_devinfo_mem(info); |
| 3308 | |
| 3309 | spin_lock_irqsave(&device_domain_lock, flags); |
| 3310 | |
| 3311 | if (found) |
| 3312 | break; |
| 3313 | else |
| 3314 | continue; |
| 3315 | } |
| 3316 | |
| 3317 | /* if there is no other devices under the same iommu |
| 3318 | * owned by this domain, clear this iommu in iommu_bmp |
| 3319 | * update iommu count and coherency |
| 3320 | */ |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3321 | if (iommu == device_to_iommu(info->segment, info->bus, |
| 3322 | info->devfn)) |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3323 | found = 1; |
| 3324 | } |
| 3325 | |
| 3326 | if (found == 0) { |
| 3327 | unsigned long tmp_flags; |
| 3328 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); |
| 3329 | clear_bit(iommu->seq_id, &domain->iommu_bmp); |
| 3330 | domain->iommu_count--; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3331 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3332 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); |
| 3333 | } |
| 3334 | |
| 3335 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| 3336 | } |
| 3337 | |
| 3338 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) |
| 3339 | { |
| 3340 | struct device_domain_info *info; |
| 3341 | struct intel_iommu *iommu; |
| 3342 | unsigned long flags1, flags2; |
| 3343 | |
| 3344 | spin_lock_irqsave(&device_domain_lock, flags1); |
| 3345 | while (!list_empty(&domain->devices)) { |
| 3346 | info = list_entry(domain->devices.next, |
| 3347 | struct device_domain_info, link); |
| 3348 | list_del(&info->link); |
| 3349 | list_del(&info->global); |
| 3350 | if (info->dev) |
| 3351 | info->dev->dev.archdata.iommu = NULL; |
| 3352 | |
| 3353 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
| 3354 | |
Yu Zhao | 93a23a7 | 2009-05-18 13:51:37 +0800 | [diff] [blame] | 3355 | iommu_disable_dev_iotlb(info); |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3356 | iommu = device_to_iommu(info->segment, info->bus, info->devfn); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3357 | iommu_detach_dev(iommu, info->bus, info->devfn); |
Han, Weidong | 3199aa6 | 2009-02-26 17:31:12 +0800 | [diff] [blame] | 3358 | iommu_detach_dependent_devices(iommu, info->dev); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3359 | |
| 3360 | /* clear this iommu in iommu_bmp, update iommu count |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3361 | * and capabilities |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3362 | */ |
| 3363 | spin_lock_irqsave(&domain->iommu_lock, flags2); |
| 3364 | if (test_and_clear_bit(iommu->seq_id, |
| 3365 | &domain->iommu_bmp)) { |
| 3366 | domain->iommu_count--; |
Sheng Yang | 58c610b | 2009-03-18 15:33:05 +0800 | [diff] [blame] | 3367 | domain_update_iommu_cap(domain); |
Weidong Han | c7151a8 | 2008-12-08 22:51:37 +0800 | [diff] [blame] | 3368 | } |
| 3369 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); |
| 3370 | |
| 3371 | free_devinfo_mem(info); |
| 3372 | spin_lock_irqsave(&device_domain_lock, flags1); |
| 3373 | } |
| 3374 | spin_unlock_irqrestore(&device_domain_lock, flags1); |
| 3375 | } |
| 3376 | |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3377 | /* domain id for virtual machine, it won't be set in context */ |
| 3378 | static unsigned long vm_domid; |
| 3379 | |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3380 | static int vm_domain_min_agaw(struct dmar_domain *domain) |
| 3381 | { |
| 3382 | int i; |
| 3383 | int min_agaw = domain->agaw; |
| 3384 | |
| 3385 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); |
| 3386 | for (; i < g_num_of_iommus; ) { |
| 3387 | if (min_agaw > g_iommus[i]->agaw) |
| 3388 | min_agaw = g_iommus[i]->agaw; |
| 3389 | |
| 3390 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); |
| 3391 | } |
| 3392 | |
| 3393 | return min_agaw; |
| 3394 | } |
| 3395 | |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3396 | static struct dmar_domain *iommu_alloc_vm_domain(void) |
| 3397 | { |
| 3398 | struct dmar_domain *domain; |
| 3399 | |
| 3400 | domain = alloc_domain_mem(); |
| 3401 | if (!domain) |
| 3402 | return NULL; |
| 3403 | |
| 3404 | domain->id = vm_domid++; |
| 3405 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
| 3406 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; |
| 3407 | |
| 3408 | return domain; |
| 3409 | } |
| 3410 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 3411 | static int md_domain_init(struct dmar_domain *domain, int guest_width) |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3412 | { |
| 3413 | int adjust_width; |
| 3414 | |
| 3415 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3416 | spin_lock_init(&domain->iommu_lock); |
| 3417 | |
| 3418 | domain_reserve_special_ranges(domain); |
| 3419 | |
| 3420 | /* calculate AGAW */ |
| 3421 | domain->gaw = guest_width; |
| 3422 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
| 3423 | domain->agaw = width_to_agaw(adjust_width); |
| 3424 | |
| 3425 | INIT_LIST_HEAD(&domain->devices); |
| 3426 | |
| 3427 | domain->iommu_count = 0; |
| 3428 | domain->iommu_coherency = 0; |
Sheng Yang | c5b1525 | 2009-08-06 13:31:56 +0800 | [diff] [blame] | 3429 | domain->iommu_snooping = 0; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3430 | domain->max_addr = 0; |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3431 | |
| 3432 | /* always allocate the top pgd */ |
| 3433 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
| 3434 | if (!domain->pgd) |
| 3435 | return -ENOMEM; |
| 3436 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); |
| 3437 | return 0; |
| 3438 | } |
| 3439 | |
| 3440 | static void iommu_free_vm_domain(struct dmar_domain *domain) |
| 3441 | { |
| 3442 | unsigned long flags; |
| 3443 | struct dmar_drhd_unit *drhd; |
| 3444 | struct intel_iommu *iommu; |
| 3445 | unsigned long i; |
| 3446 | unsigned long ndomains; |
| 3447 | |
| 3448 | for_each_drhd_unit(drhd) { |
| 3449 | if (drhd->ignored) |
| 3450 | continue; |
| 3451 | iommu = drhd->iommu; |
| 3452 | |
| 3453 | ndomains = cap_ndoms(iommu->cap); |
| 3454 | i = find_first_bit(iommu->domain_ids, ndomains); |
| 3455 | for (; i < ndomains; ) { |
| 3456 | if (iommu->domains[i] == domain) { |
| 3457 | spin_lock_irqsave(&iommu->lock, flags); |
| 3458 | clear_bit(i, iommu->domain_ids); |
| 3459 | iommu->domains[i] = NULL; |
| 3460 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 3461 | break; |
| 3462 | } |
| 3463 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); |
| 3464 | } |
| 3465 | } |
| 3466 | } |
| 3467 | |
| 3468 | static void vm_domain_exit(struct dmar_domain *domain) |
| 3469 | { |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3470 | /* Domain 0 is reserved, so dont process it */ |
| 3471 | if (!domain) |
| 3472 | return; |
| 3473 | |
| 3474 | vm_domain_remove_all_dev_info(domain); |
| 3475 | /* destroy iovas */ |
| 3476 | put_iova_domain(&domain->iovad); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3477 | |
| 3478 | /* clear ptes */ |
David Woodhouse | 595badf | 2009-06-27 22:09:11 +0100 | [diff] [blame] | 3479 | dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3480 | |
| 3481 | /* free page tables */ |
David Woodhouse | d794dc9 | 2009-06-28 00:27:49 +0100 | [diff] [blame] | 3482 | dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
Weidong Han | 5e98c4b | 2008-12-08 23:03:27 +0800 | [diff] [blame] | 3483 | |
| 3484 | iommu_free_vm_domain(domain); |
| 3485 | free_domain_mem(domain); |
| 3486 | } |
| 3487 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3488 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3489 | { |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3490 | struct dmar_domain *dmar_domain; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3491 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3492 | dmar_domain = iommu_alloc_vm_domain(); |
| 3493 | if (!dmar_domain) { |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3494 | printk(KERN_ERR |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3495 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
| 3496 | return -ENOMEM; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3497 | } |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 3498 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3499 | printk(KERN_ERR |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3500 | "intel_iommu_domain_init() failed\n"); |
| 3501 | vm_domain_exit(dmar_domain); |
| 3502 | return -ENOMEM; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3503 | } |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3504 | domain->priv = dmar_domain; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3505 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3506 | return 0; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3507 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3508 | |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3509 | static void intel_iommu_domain_destroy(struct iommu_domain *domain) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3510 | { |
Joerg Roedel | 5d45080 | 2008-12-03 14:52:32 +0100 | [diff] [blame] | 3511 | struct dmar_domain *dmar_domain = domain->priv; |
| 3512 | |
| 3513 | domain->priv = NULL; |
| 3514 | vm_domain_exit(dmar_domain); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3515 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3516 | |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3517 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
| 3518 | struct device *dev) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3519 | { |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3520 | struct dmar_domain *dmar_domain = domain->priv; |
| 3521 | struct pci_dev *pdev = to_pci_dev(dev); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3522 | struct intel_iommu *iommu; |
| 3523 | int addr_width; |
| 3524 | u64 end; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3525 | |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3526 | /* normally pdev is not mapped */ |
| 3527 | if (unlikely(domain_context_mapped(pdev))) { |
| 3528 | struct dmar_domain *old_domain; |
| 3529 | |
| 3530 | old_domain = find_domain(pdev); |
| 3531 | if (old_domain) { |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 3532 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || |
| 3533 | dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) |
| 3534 | domain_remove_one_dev_info(old_domain, pdev); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3535 | else |
| 3536 | domain_remove_dev_info(old_domain); |
| 3537 | } |
| 3538 | } |
| 3539 | |
David Woodhouse | 276dbf99 | 2009-04-04 01:45:37 +0100 | [diff] [blame] | 3540 | iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, |
| 3541 | pdev->devfn); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3542 | if (!iommu) |
| 3543 | return -ENODEV; |
| 3544 | |
| 3545 | /* check if this iommu agaw is sufficient for max mapped address */ |
| 3546 | addr_width = agaw_to_width(iommu->agaw); |
| 3547 | end = DOMAIN_MAX_ADDR(addr_width); |
| 3548 | end = end & VTD_PAGE_MASK; |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3549 | if (end < dmar_domain->max_addr) { |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3550 | printk(KERN_ERR "%s: iommu agaw (%d) is not " |
| 3551 | "sufficient for the mapped address (%llx)\n", |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3552 | __func__, iommu->agaw, dmar_domain->max_addr); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3553 | return -EFAULT; |
| 3554 | } |
| 3555 | |
David Woodhouse | 5fe60f4 | 2009-08-09 10:53:41 +0100 | [diff] [blame] | 3556 | return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3557 | } |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3558 | |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3559 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
| 3560 | struct device *dev) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3561 | { |
Joerg Roedel | 4c5478c | 2008-12-03 14:58:24 +0100 | [diff] [blame] | 3562 | struct dmar_domain *dmar_domain = domain->priv; |
| 3563 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3564 | |
Fenghua Yu | 2c2e2c3 | 2009-06-19 13:47:29 -0700 | [diff] [blame] | 3565 | domain_remove_one_dev_info(dmar_domain, pdev); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3566 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3567 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3568 | static int intel_iommu_map_range(struct iommu_domain *domain, |
| 3569 | unsigned long iova, phys_addr_t hpa, |
| 3570 | size_t size, int iommu_prot) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3571 | { |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3572 | struct dmar_domain *dmar_domain = domain->priv; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3573 | u64 max_addr; |
| 3574 | int addr_width; |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3575 | int prot = 0; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3576 | int ret; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3577 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3578 | if (iommu_prot & IOMMU_READ) |
| 3579 | prot |= DMA_PTE_READ; |
| 3580 | if (iommu_prot & IOMMU_WRITE) |
| 3581 | prot |= DMA_PTE_WRITE; |
Sheng Yang | 9cf06697 | 2009-03-18 15:33:07 +0800 | [diff] [blame] | 3582 | if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) |
| 3583 | prot |= DMA_PTE_SNP; |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3584 | |
David Woodhouse | 163cc52 | 2009-06-28 00:51:17 +0100 | [diff] [blame] | 3585 | max_addr = iova + size; |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3586 | if (dmar_domain->max_addr < max_addr) { |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3587 | int min_agaw; |
| 3588 | u64 end; |
| 3589 | |
| 3590 | /* check if minimum agaw is sufficient for mapped address */ |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3591 | min_agaw = vm_domain_min_agaw(dmar_domain); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3592 | addr_width = agaw_to_width(min_agaw); |
| 3593 | end = DOMAIN_MAX_ADDR(addr_width); |
| 3594 | end = end & VTD_PAGE_MASK; |
| 3595 | if (end < max_addr) { |
| 3596 | printk(KERN_ERR "%s: iommu agaw (%d) is not " |
| 3597 | "sufficient for the mapped address (%llx)\n", |
| 3598 | __func__, min_agaw, max_addr); |
| 3599 | return -EFAULT; |
| 3600 | } |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3601 | dmar_domain->max_addr = max_addr; |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3602 | } |
David Woodhouse | ad05122 | 2009-06-28 14:22:28 +0100 | [diff] [blame] | 3603 | /* Round up size to next multiple of PAGE_SIZE, if it and |
| 3604 | the low bits of hpa would take us onto the next page */ |
David Woodhouse | 88cb6a7 | 2009-06-28 15:03:06 +0100 | [diff] [blame] | 3605 | size = aligned_nrpages(hpa, size); |
David Woodhouse | ad05122 | 2009-06-28 14:22:28 +0100 | [diff] [blame] | 3606 | ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, |
| 3607 | hpa >> VTD_PAGE_SHIFT, size, prot); |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3608 | return ret; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3609 | } |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3610 | |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3611 | static void intel_iommu_unmap_range(struct iommu_domain *domain, |
| 3612 | unsigned long iova, size_t size) |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3613 | { |
Joerg Roedel | dde57a2 | 2008-12-03 15:04:09 +0100 | [diff] [blame] | 3614 | struct dmar_domain *dmar_domain = domain->priv; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3615 | |
Sheng Yang | 4b99d35 | 2009-07-08 11:52:52 +0100 | [diff] [blame] | 3616 | if (!size) |
| 3617 | return; |
| 3618 | |
David Woodhouse | 163cc52 | 2009-06-28 00:51:17 +0100 | [diff] [blame] | 3619 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
| 3620 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
Weidong Han | fe40f1e | 2008-12-08 23:10:23 +0800 | [diff] [blame] | 3621 | |
David Woodhouse | 163cc52 | 2009-06-28 00:51:17 +0100 | [diff] [blame] | 3622 | if (dmar_domain->max_addr == iova + size) |
| 3623 | dmar_domain->max_addr = iova; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3624 | } |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3625 | |
Joerg Roedel | d14d657 | 2008-12-03 15:06:57 +0100 | [diff] [blame] | 3626 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
| 3627 | unsigned long iova) |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3628 | { |
Joerg Roedel | d14d657 | 2008-12-03 15:06:57 +0100 | [diff] [blame] | 3629 | struct dmar_domain *dmar_domain = domain->priv; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3630 | struct dma_pte *pte; |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3631 | u64 phys = 0; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3632 | |
David Woodhouse | b026fd2 | 2009-06-28 10:37:25 +0100 | [diff] [blame] | 3633 | pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3634 | if (pte) |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3635 | phys = dma_pte_addr(pte); |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3636 | |
Weidong Han | faa3d6f | 2008-12-08 23:09:29 +0800 | [diff] [blame] | 3637 | return phys; |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 3638 | } |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3639 | |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 3640 | static int intel_iommu_domain_has_cap(struct iommu_domain *domain, |
| 3641 | unsigned long cap) |
| 3642 | { |
| 3643 | struct dmar_domain *dmar_domain = domain->priv; |
| 3644 | |
| 3645 | if (cap == IOMMU_CAP_CACHE_COHERENCY) |
| 3646 | return dmar_domain->iommu_snooping; |
| 3647 | |
| 3648 | return 0; |
| 3649 | } |
| 3650 | |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3651 | static struct iommu_ops intel_iommu_ops = { |
| 3652 | .domain_init = intel_iommu_domain_init, |
| 3653 | .domain_destroy = intel_iommu_domain_destroy, |
| 3654 | .attach_dev = intel_iommu_attach_device, |
| 3655 | .detach_dev = intel_iommu_detach_device, |
| 3656 | .map = intel_iommu_map_range, |
| 3657 | .unmap = intel_iommu_unmap_range, |
| 3658 | .iova_to_phys = intel_iommu_iova_to_phys, |
Sheng Yang | dbb9fd8 | 2009-03-18 15:33:06 +0800 | [diff] [blame] | 3659 | .domain_has_cap = intel_iommu_domain_has_cap, |
Joerg Roedel | a8bcbb0d | 2008-12-03 15:14:02 +0100 | [diff] [blame] | 3660 | }; |
David Woodhouse | 9af8814 | 2009-02-13 23:18:03 +0000 | [diff] [blame] | 3661 | |
| 3662 | static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) |
| 3663 | { |
| 3664 | /* |
| 3665 | * Mobile 4 Series Chipset neglects to set RWBF capability, |
| 3666 | * but needs it: |
| 3667 | */ |
| 3668 | printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); |
| 3669 | rwbf_quirk = 1; |
| 3670 | } |
| 3671 | |
| 3672 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); |