blob: 13121821db7f66bd0922b25f30f9bfe1e21e1c8f [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59
Mark McLoughlinf27be032008-11-20 15:49:43 +000060#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070061#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070062#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080063
Weidong Hand9630fe2008-12-08 11:06:32 +080064/* global iommu list, set NULL for ignored DMAR units */
65static struct intel_iommu **g_iommus;
66
David Woodhouse9af88142009-02-13 23:18:03 +000067static int rwbf_quirk;
68
Mark McLoughlin46b08e12008-11-20 15:49:44 +000069/*
70 * 0: Present
71 * 1-11: Reserved
72 * 12-63: Context Ptr (12 - (haw-1))
73 * 64-127: Reserved
74 */
75struct root_entry {
76 u64 val;
77 u64 rsvd1;
78};
79#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
80static inline bool root_present(struct root_entry *root)
81{
82 return (root->val & 1);
83}
84static inline void set_root_present(struct root_entry *root)
85{
86 root->val |= 1;
87}
88static inline void set_root_value(struct root_entry *root, unsigned long value)
89{
90 root->val |= value & VTD_PAGE_MASK;
91}
92
93static inline struct context_entry *
94get_context_addr_from_root(struct root_entry *root)
95{
96 return (struct context_entry *)
97 (root_present(root)?phys_to_virt(
98 root->val & VTD_PAGE_MASK) :
99 NULL);
100}
101
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000102/*
103 * low 64 bits:
104 * 0: present
105 * 1: fault processing disable
106 * 2-3: translation type
107 * 12-63: address space root
108 * high 64 bits:
109 * 0-2: address width
110 * 3-6: aval
111 * 8-23: domain id
112 */
113struct context_entry {
114 u64 lo;
115 u64 hi;
116};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000117
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000118static inline bool context_present(struct context_entry *context)
119{
120 return (context->lo & 1);
121}
122static inline void context_set_present(struct context_entry *context)
123{
124 context->lo |= 1;
125}
126
127static inline void context_set_fault_enable(struct context_entry *context)
128{
129 context->lo &= (((u64)-1) << 2) | 1;
130}
131
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000132static inline void context_set_translation_type(struct context_entry *context,
133 unsigned long value)
134{
135 context->lo &= (((u64)-1) << 4) | 3;
136 context->lo |= (value & 3) << 2;
137}
138
139static inline void context_set_address_root(struct context_entry *context,
140 unsigned long value)
141{
142 context->lo |= value & VTD_PAGE_MASK;
143}
144
145static inline void context_set_address_width(struct context_entry *context,
146 unsigned long value)
147{
148 context->hi |= value & 7;
149}
150
151static inline void context_set_domain_id(struct context_entry *context,
152 unsigned long value)
153{
154 context->hi |= (value & ((1 << 16) - 1)) << 8;
155}
156
157static inline void context_clear_entry(struct context_entry *context)
158{
159 context->lo = 0;
160 context->hi = 0;
161}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000162
Mark McLoughlin622ba122008-11-20 15:49:46 +0000163/*
164 * 0: readable
165 * 1: writable
166 * 2-6: reserved
167 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800168 * 8-10: available
169 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000170 * 12-63: Host physcial address
171 */
172struct dma_pte {
173 u64 val;
174};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000175
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000176static inline void dma_clear_pte(struct dma_pte *pte)
177{
178 pte->val = 0;
179}
180
181static inline void dma_set_pte_readable(struct dma_pte *pte)
182{
183 pte->val |= DMA_PTE_READ;
184}
185
186static inline void dma_set_pte_writable(struct dma_pte *pte)
187{
188 pte->val |= DMA_PTE_WRITE;
189}
190
Sheng Yang9cf06692009-03-18 15:33:07 +0800191static inline void dma_set_pte_snp(struct dma_pte *pte)
192{
193 pte->val |= DMA_PTE_SNP;
194}
195
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000196static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
197{
198 pte->val = (pte->val & ~3) | (prot & 3);
199}
200
201static inline u64 dma_pte_addr(struct dma_pte *pte)
202{
203 return (pte->val & VTD_PAGE_MASK);
204}
205
206static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
207{
208 pte->val |= (addr & VTD_PAGE_MASK);
209}
210
211static inline bool dma_pte_present(struct dma_pte *pte)
212{
213 return (pte->val & 3) != 0;
214}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000215
Weidong Han3b5410e2008-12-08 09:17:15 +0800216/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100217#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800218
Weidong Han1ce28fe2008-12-08 16:35:39 +0800219/* domain represents a virtual machine, more than one devices
220 * across iommus may be owned in one domain, e.g. kvm guest.
221 */
222#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
223
Mark McLoughlin99126f72008-11-20 15:49:47 +0000224struct dmar_domain {
225 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800226 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000227
228 struct list_head devices; /* all devices' list */
229 struct iova_domain iovad; /* iova's that belong to this domain */
230
231 struct dma_pte *pgd; /* virtual address */
232 spinlock_t mapping_lock; /* page table lock */
233 int gaw; /* max guest address width */
234
235 /* adjusted guest address width, 0 is level 2 30-bit */
236 int agaw;
237
Weidong Han3b5410e2008-12-08 09:17:15 +0800238 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800239
240 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800241 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800242 int iommu_count; /* reference count of iommu */
243 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800244 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000245};
246
Mark McLoughlina647dac2008-11-20 15:49:48 +0000247/* PCI domain-device relationship */
248struct device_domain_info {
249 struct list_head link; /* link to domain siblings */
250 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100251 int segment; /* PCI domain */
252 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000253 u8 devfn; /* PCI devfn number */
254 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
255 struct dmar_domain *domain; /* pointer to domain */
256};
257
mark gross5e0d2a62008-03-04 15:22:08 -0800258static void flush_unmaps_timeout(unsigned long data);
259
260DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
261
mark gross80b20dd2008-04-18 13:53:58 -0700262#define HIGH_WATER_MARK 250
263struct deferred_flush_tables {
264 int next;
265 struct iova *iova[HIGH_WATER_MARK];
266 struct dmar_domain *domain[HIGH_WATER_MARK];
267};
268
269static struct deferred_flush_tables *deferred_flush;
270
mark gross5e0d2a62008-03-04 15:22:08 -0800271/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800272static int g_num_of_iommus;
273
274static DEFINE_SPINLOCK(async_umap_flush_lock);
275static LIST_HEAD(unmaps_to_do);
276
277static int timer_on;
278static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800279
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700280static void domain_remove_dev_info(struct dmar_domain *domain);
281
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800282#ifdef CONFIG_DMAR_DEFAULT_ON
283int dmar_disabled = 0;
284#else
285int dmar_disabled = 1;
286#endif /*CONFIG_DMAR_DEFAULT_ON*/
287
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700288static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700289static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800290static int intel_iommu_strict;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700291int iommu_pass_through;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700292
293#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
294static DEFINE_SPINLOCK(device_domain_lock);
295static LIST_HEAD(device_domain_list);
296
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100297static struct iommu_ops intel_iommu_ops;
298
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700299static int __init intel_iommu_setup(char *str)
300{
301 if (!str)
302 return -EINVAL;
303 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800304 if (!strncmp(str, "on", 2)) {
305 dmar_disabled = 0;
306 printk(KERN_INFO "Intel-IOMMU: enabled\n");
307 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700308 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800309 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700310 } else if (!strncmp(str, "igfx_off", 8)) {
311 dmar_map_gfx = 0;
312 printk(KERN_INFO
313 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700314 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800315 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700316 "Intel-IOMMU: Forcing DAC for PCI devices\n");
317 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800318 } else if (!strncmp(str, "strict", 6)) {
319 printk(KERN_INFO
320 "Intel-IOMMU: disable batched IOTLB flush\n");
321 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700322 }
323
324 str += strcspn(str, ",");
325 while (*str == ',')
326 str++;
327 }
328 return 0;
329}
330__setup("intel_iommu=", intel_iommu_setup);
331
332static struct kmem_cache *iommu_domain_cache;
333static struct kmem_cache *iommu_devinfo_cache;
334static struct kmem_cache *iommu_iova_cache;
335
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700336static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
337{
338 unsigned int flags;
339 void *vaddr;
340
341 /* trying to avoid low memory issues */
342 flags = current->flags & PF_MEMALLOC;
343 current->flags |= PF_MEMALLOC;
344 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
345 current->flags &= (~PF_MEMALLOC | flags);
346 return vaddr;
347}
348
349
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700350static inline void *alloc_pgtable_page(void)
351{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700352 unsigned int flags;
353 void *vaddr;
354
355 /* trying to avoid low memory issues */
356 flags = current->flags & PF_MEMALLOC;
357 current->flags |= PF_MEMALLOC;
358 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
359 current->flags &= (~PF_MEMALLOC | flags);
360 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700361}
362
363static inline void free_pgtable_page(void *vaddr)
364{
365 free_page((unsigned long)vaddr);
366}
367
368static inline void *alloc_domain_mem(void)
369{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700370 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700371}
372
Kay, Allen M38717942008-09-09 18:37:29 +0300373static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700374{
375 kmem_cache_free(iommu_domain_cache, vaddr);
376}
377
378static inline void * alloc_devinfo_mem(void)
379{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700380 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700381}
382
383static inline void free_devinfo_mem(void *vaddr)
384{
385 kmem_cache_free(iommu_devinfo_cache, vaddr);
386}
387
388struct iova *alloc_iova_mem(void)
389{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700390 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700391}
392
393void free_iova_mem(struct iova *iova)
394{
395 kmem_cache_free(iommu_iova_cache, iova);
396}
397
Weidong Han1b573682008-12-08 15:34:06 +0800398
399static inline int width_to_agaw(int width);
400
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700401static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800402{
403 unsigned long sagaw;
404 int agaw = -1;
405
406 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700407 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800408 agaw >= 0; agaw--) {
409 if (test_bit(agaw, &sagaw))
410 break;
411 }
412
413 return agaw;
414}
415
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700416/*
417 * Calculate max SAGAW for each iommu.
418 */
419int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
420{
421 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
422}
423
424/*
425 * calculate agaw for each iommu.
426 * "SAGAW" may be different across iommus, use a default agaw, and
427 * get a supported less agaw for iommus that don't support the default agaw.
428 */
429int iommu_calculate_agaw(struct intel_iommu *iommu)
430{
431 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
432}
433
Weidong Han8c11e792008-12-08 15:29:22 +0800434/* in native case, each domain is related to only one iommu */
435static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
436{
437 int iommu_id;
438
Weidong Han1ce28fe2008-12-08 16:35:39 +0800439 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
440
Weidong Han8c11e792008-12-08 15:29:22 +0800441 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
442 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
443 return NULL;
444
445 return g_iommus[iommu_id];
446}
447
Weidong Han8e6040972008-12-08 15:49:06 +0800448static void domain_update_iommu_coherency(struct dmar_domain *domain)
449{
450 int i;
451
452 domain->iommu_coherency = 1;
453
454 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
455 for (; i < g_num_of_iommus; ) {
456 if (!ecap_coherent(g_iommus[i]->ecap)) {
457 domain->iommu_coherency = 0;
458 break;
459 }
460 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
461 }
462}
463
Sheng Yang58c610b2009-03-18 15:33:05 +0800464static void domain_update_iommu_snooping(struct dmar_domain *domain)
465{
466 int i;
467
468 domain->iommu_snooping = 1;
469
470 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
471 for (; i < g_num_of_iommus; ) {
472 if (!ecap_sc_support(g_iommus[i]->ecap)) {
473 domain->iommu_snooping = 0;
474 break;
475 }
476 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
477 }
478}
479
480/* Some capabilities may be different across iommus */
481static void domain_update_iommu_cap(struct dmar_domain *domain)
482{
483 domain_update_iommu_coherency(domain);
484 domain_update_iommu_snooping(domain);
485}
486
David Woodhouse276dbf992009-04-04 01:45:37 +0100487static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800488{
489 struct dmar_drhd_unit *drhd = NULL;
490 int i;
491
492 for_each_drhd_unit(drhd) {
493 if (drhd->ignored)
494 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100495 if (segment != drhd->segment)
496 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800497
David Woodhouse924b6232009-04-04 00:39:25 +0100498 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000499 if (drhd->devices[i] &&
500 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800501 drhd->devices[i]->devfn == devfn)
502 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700503 if (drhd->devices[i] &&
504 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100505 drhd->devices[i]->subordinate->number <= bus &&
506 drhd->devices[i]->subordinate->subordinate >= bus)
507 return drhd->iommu;
508 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800509
510 if (drhd->include_all)
511 return drhd->iommu;
512 }
513
514 return NULL;
515}
516
Weidong Han5331fe62008-12-08 23:00:00 +0800517static void domain_flush_cache(struct dmar_domain *domain,
518 void *addr, int size)
519{
520 if (!domain->iommu_coherency)
521 clflush_cache_range(addr, size);
522}
523
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700524/* Gets context entry for a given bus and devfn */
525static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
526 u8 bus, u8 devfn)
527{
528 struct root_entry *root;
529 struct context_entry *context;
530 unsigned long phy_addr;
531 unsigned long flags;
532
533 spin_lock_irqsave(&iommu->lock, flags);
534 root = &iommu->root_entry[bus];
535 context = get_context_addr_from_root(root);
536 if (!context) {
537 context = (struct context_entry *)alloc_pgtable_page();
538 if (!context) {
539 spin_unlock_irqrestore(&iommu->lock, flags);
540 return NULL;
541 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700542 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700543 phy_addr = virt_to_phys((void *)context);
544 set_root_value(root, phy_addr);
545 set_root_present(root);
546 __iommu_flush_cache(iommu, root, sizeof(*root));
547 }
548 spin_unlock_irqrestore(&iommu->lock, flags);
549 return &context[devfn];
550}
551
552static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
553{
554 struct root_entry *root;
555 struct context_entry *context;
556 int ret;
557 unsigned long flags;
558
559 spin_lock_irqsave(&iommu->lock, flags);
560 root = &iommu->root_entry[bus];
561 context = get_context_addr_from_root(root);
562 if (!context) {
563 ret = 0;
564 goto out;
565 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000566 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700567out:
568 spin_unlock_irqrestore(&iommu->lock, flags);
569 return ret;
570}
571
572static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
573{
574 struct root_entry *root;
575 struct context_entry *context;
576 unsigned long flags;
577
578 spin_lock_irqsave(&iommu->lock, flags);
579 root = &iommu->root_entry[bus];
580 context = get_context_addr_from_root(root);
581 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000582 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700583 __iommu_flush_cache(iommu, &context[devfn], \
584 sizeof(*context));
585 }
586 spin_unlock_irqrestore(&iommu->lock, flags);
587}
588
589static void free_context_table(struct intel_iommu *iommu)
590{
591 struct root_entry *root;
592 int i;
593 unsigned long flags;
594 struct context_entry *context;
595
596 spin_lock_irqsave(&iommu->lock, flags);
597 if (!iommu->root_entry) {
598 goto out;
599 }
600 for (i = 0; i < ROOT_ENTRY_NR; i++) {
601 root = &iommu->root_entry[i];
602 context = get_context_addr_from_root(root);
603 if (context)
604 free_pgtable_page(context);
605 }
606 free_pgtable_page(iommu->root_entry);
607 iommu->root_entry = NULL;
608out:
609 spin_unlock_irqrestore(&iommu->lock, flags);
610}
611
612/* page table handling */
613#define LEVEL_STRIDE (9)
614#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
615
616static inline int agaw_to_level(int agaw)
617{
618 return agaw + 2;
619}
620
621static inline int agaw_to_width(int agaw)
622{
623 return 30 + agaw * LEVEL_STRIDE;
624
625}
626
627static inline int width_to_agaw(int width)
628{
629 return (width - 30) / LEVEL_STRIDE;
630}
631
632static inline unsigned int level_to_offset_bits(int level)
633{
634 return (12 + (level - 1) * LEVEL_STRIDE);
635}
636
637static inline int address_level_offset(u64 addr, int level)
638{
639 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
640}
641
642static inline u64 level_mask(int level)
643{
644 return ((u64)-1 << level_to_offset_bits(level));
645}
646
647static inline u64 level_size(int level)
648{
649 return ((u64)1 << level_to_offset_bits(level));
650}
651
652static inline u64 align_to_level(u64 addr, int level)
653{
654 return ((addr + level_size(level) - 1) & level_mask(level));
655}
656
657static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
658{
659 int addr_width = agaw_to_width(domain->agaw);
660 struct dma_pte *parent, *pte = NULL;
661 int level = agaw_to_level(domain->agaw);
662 int offset;
663 unsigned long flags;
664
665 BUG_ON(!domain->pgd);
666
667 addr &= (((u64)1) << addr_width) - 1;
668 parent = domain->pgd;
669
670 spin_lock_irqsave(&domain->mapping_lock, flags);
671 while (level > 0) {
672 void *tmp_page;
673
674 offset = address_level_offset(addr, level);
675 pte = &parent[offset];
676 if (level == 1)
677 break;
678
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000679 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700680 tmp_page = alloc_pgtable_page();
681
682 if (!tmp_page) {
683 spin_unlock_irqrestore(&domain->mapping_lock,
684 flags);
685 return NULL;
686 }
Weidong Han5331fe62008-12-08 23:00:00 +0800687 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000688 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700689 /*
690 * high level table always sets r/w, last level page
691 * table control read/write
692 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000693 dma_set_pte_readable(pte);
694 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800695 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700696 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000697 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698 level--;
699 }
700
701 spin_unlock_irqrestore(&domain->mapping_lock, flags);
702 return pte;
703}
704
705/* return address's pte at specific level */
706static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
707 int level)
708{
709 struct dma_pte *parent, *pte = NULL;
710 int total = agaw_to_level(domain->agaw);
711 int offset;
712
713 parent = domain->pgd;
714 while (level <= total) {
715 offset = address_level_offset(addr, total);
716 pte = &parent[offset];
717 if (level == total)
718 return pte;
719
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000720 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700721 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000722 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700723 total--;
724 }
725 return NULL;
726}
727
728/* clear one page's page table */
729static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
730{
731 struct dma_pte *pte = NULL;
732
733 /* get last level pte */
734 pte = dma_addr_level_pte(domain, addr, 1);
735
736 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000737 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800738 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700739 }
740}
741
742/* clear last level pte, a tlb flush should be followed */
743static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
744{
745 int addr_width = agaw_to_width(domain->agaw);
Zhao, Yuafeeb7ce2009-02-13 17:55:49 +0800746 int npages;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747
748 start &= (((u64)1) << addr_width) - 1;
749 end &= (((u64)1) << addr_width) - 1;
750 /* in case it's partial page */
Fenghua Yu31d35682009-04-06 11:21:49 -0700751 start &= PAGE_MASK;
752 end = PAGE_ALIGN(end);
Zhao, Yuafeeb7ce2009-02-13 17:55:49 +0800753 npages = (end - start) / VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754
755 /* we don't need lock here, nobody else touches the iova range */
Zhao, Yuafeeb7ce2009-02-13 17:55:49 +0800756 while (npages--) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700758 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759 }
760}
761
762/* free page table pages. last level pte should already be cleared */
763static void dma_pte_free_pagetable(struct dmar_domain *domain,
764 u64 start, u64 end)
765{
766 int addr_width = agaw_to_width(domain->agaw);
767 struct dma_pte *pte;
768 int total = agaw_to_level(domain->agaw);
769 int level;
770 u64 tmp;
771
772 start &= (((u64)1) << addr_width) - 1;
773 end &= (((u64)1) << addr_width) - 1;
774
775 /* we don't need lock here, nobody else touches the iova range */
776 level = 2;
777 while (level <= total) {
778 tmp = align_to_level(start, level);
779 if (tmp >= end || (tmp + level_size(level) > end))
780 return;
781
782 while (tmp < end) {
783 pte = dma_addr_level_pte(domain, tmp, level);
784 if (pte) {
785 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000786 phys_to_virt(dma_pte_addr(pte)));
787 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800788 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 }
790 tmp += level_size(level);
791 }
792 level++;
793 }
794 /* free pgd */
795 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
796 free_pgtable_page(domain->pgd);
797 domain->pgd = NULL;
798 }
799}
800
801/* iommu handling */
802static int iommu_alloc_root_entry(struct intel_iommu *iommu)
803{
804 struct root_entry *root;
805 unsigned long flags;
806
807 root = (struct root_entry *)alloc_pgtable_page();
808 if (!root)
809 return -ENOMEM;
810
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700811 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812
813 spin_lock_irqsave(&iommu->lock, flags);
814 iommu->root_entry = root;
815 spin_unlock_irqrestore(&iommu->lock, flags);
816
817 return 0;
818}
819
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820static void iommu_set_root_entry(struct intel_iommu *iommu)
821{
822 void *addr;
823 u32 cmd, sts;
824 unsigned long flag;
825
826 addr = iommu->root_entry;
827
828 spin_lock_irqsave(&iommu->register_lock, flag);
829 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
830
831 cmd = iommu->gcmd | DMA_GCMD_SRTP;
832 writel(cmd, iommu->reg + DMAR_GCMD_REG);
833
834 /* Make sure hardware complete it */
835 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
836 readl, (sts & DMA_GSTS_RTPS), sts);
837
838 spin_unlock_irqrestore(&iommu->register_lock, flag);
839}
840
841static void iommu_flush_write_buffer(struct intel_iommu *iommu)
842{
843 u32 val;
844 unsigned long flag;
845
David Woodhouse9af88142009-02-13 23:18:03 +0000846 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700847 return;
848 val = iommu->gcmd | DMA_GCMD_WBF;
849
850 spin_lock_irqsave(&iommu->register_lock, flag);
851 writel(val, iommu->reg + DMAR_GCMD_REG);
852
853 /* Make sure hardware complete it */
854 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
855 readl, (!(val & DMA_GSTS_WBFS)), val);
856
857 spin_unlock_irqrestore(&iommu->register_lock, flag);
858}
859
860/* return value determine if we need a write buffer flush */
861static int __iommu_flush_context(struct intel_iommu *iommu,
862 u16 did, u16 source_id, u8 function_mask, u64 type,
863 int non_present_entry_flush)
864{
865 u64 val = 0;
866 unsigned long flag;
867
868 /*
869 * In the non-present entry flush case, if hardware doesn't cache
870 * non-present entry we do nothing and if hardware cache non-present
871 * entry, we flush entries of domain 0 (the domain id is used to cache
872 * any non-present entries)
873 */
874 if (non_present_entry_flush) {
875 if (!cap_caching_mode(iommu->cap))
876 return 1;
877 else
878 did = 0;
879 }
880
881 switch (type) {
882 case DMA_CCMD_GLOBAL_INVL:
883 val = DMA_CCMD_GLOBAL_INVL;
884 break;
885 case DMA_CCMD_DOMAIN_INVL:
886 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
887 break;
888 case DMA_CCMD_DEVICE_INVL:
889 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
890 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
891 break;
892 default:
893 BUG();
894 }
895 val |= DMA_CCMD_ICC;
896
897 spin_lock_irqsave(&iommu->register_lock, flag);
898 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
899
900 /* Make sure hardware complete it */
901 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
902 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
903
904 spin_unlock_irqrestore(&iommu->register_lock, flag);
905
Ameya Palande4d235ba2008-10-18 20:27:30 -0700906 /* flush context entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 return 0;
908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* return value determine if we need a write buffer flush */
911static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
912 u64 addr, unsigned int size_order, u64 type,
913 int non_present_entry_flush)
914{
915 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
916 u64 val = 0, val_iva = 0;
917 unsigned long flag;
918
919 /*
920 * In the non-present entry flush case, if hardware doesn't cache
921 * non-present entry we do nothing and if hardware cache non-present
922 * entry, we flush entries of domain 0 (the domain id is used to cache
923 * any non-present entries)
924 */
925 if (non_present_entry_flush) {
926 if (!cap_caching_mode(iommu->cap))
927 return 1;
928 else
929 did = 0;
930 }
931
932 switch (type) {
933 case DMA_TLB_GLOBAL_FLUSH:
934 /* global flush doesn't need set IVA_REG */
935 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
936 break;
937 case DMA_TLB_DSI_FLUSH:
938 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
939 break;
940 case DMA_TLB_PSI_FLUSH:
941 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
942 /* Note: always flush non-leaf currently */
943 val_iva = size_order | addr;
944 break;
945 default:
946 BUG();
947 }
948 /* Note: set drain read/write */
949#if 0
950 /*
951 * This is probably to be super secure.. Looks like we can
952 * ignore it without any impact.
953 */
954 if (cap_read_drain(iommu->cap))
955 val |= DMA_TLB_READ_DRAIN;
956#endif
957 if (cap_write_drain(iommu->cap))
958 val |= DMA_TLB_WRITE_DRAIN;
959
960 spin_lock_irqsave(&iommu->register_lock, flag);
961 /* Note: Only uses first TLB reg currently */
962 if (val_iva)
963 dmar_writeq(iommu->reg + tlb_offset, val_iva);
964 dmar_writeq(iommu->reg + tlb_offset + 8, val);
965
966 /* Make sure hardware complete it */
967 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
968 dmar_readq, (!(val & DMA_TLB_IVT)), val);
969
970 spin_unlock_irqrestore(&iommu->register_lock, flag);
971
972 /* check IOTLB invalidation granularity */
973 if (DMA_TLB_IAIG(val) == 0)
974 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
975 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
976 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700977 (unsigned long long)DMA_TLB_IIRG(type),
978 (unsigned long long)DMA_TLB_IAIG(val));
Ameya Palande4d235ba2008-10-18 20:27:30 -0700979 /* flush iotlb entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980 return 0;
981}
982
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
984 u64 addr, unsigned int pages, int non_present_entry_flush)
985{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700986 unsigned int mask;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700988 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989 BUG_ON(pages == 0);
990
991 /* Fallback to domain selective flush if no PSI support */
992 if (!cap_pgsel_inv(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700993 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
994 DMA_TLB_DSI_FLUSH,
995 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700996
997 /*
998 * PSI requires page size to be 2 ^ x, and the base address is naturally
999 * aligned to the size
1000 */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07001001 mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001002 /* Fallback to domain selective flush if size is too big */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07001003 if (mask > cap_max_amask_val(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -07001004 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
1005 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001006
Youquan Songa77b67d2008-10-16 16:31:56 -07001007 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
1008 DMA_TLB_PSI_FLUSH,
1009 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010}
1011
mark grossf8bab732008-02-08 04:18:38 -08001012static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1013{
1014 u32 pmen;
1015 unsigned long flags;
1016
1017 spin_lock_irqsave(&iommu->register_lock, flags);
1018 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1019 pmen &= ~DMA_PMEN_EPM;
1020 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1021
1022 /* wait for the protected region status bit to clear */
1023 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1024 readl, !(pmen & DMA_PMEN_PRS), pmen);
1025
1026 spin_unlock_irqrestore(&iommu->register_lock, flags);
1027}
1028
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001029static int iommu_enable_translation(struct intel_iommu *iommu)
1030{
1031 u32 sts;
1032 unsigned long flags;
1033
1034 spin_lock_irqsave(&iommu->register_lock, flags);
1035 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
1036
1037 /* Make sure hardware complete it */
1038 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1039 readl, (sts & DMA_GSTS_TES), sts);
1040
1041 iommu->gcmd |= DMA_GCMD_TE;
1042 spin_unlock_irqrestore(&iommu->register_lock, flags);
1043 return 0;
1044}
1045
1046static int iommu_disable_translation(struct intel_iommu *iommu)
1047{
1048 u32 sts;
1049 unsigned long flag;
1050
1051 spin_lock_irqsave(&iommu->register_lock, flag);
1052 iommu->gcmd &= ~DMA_GCMD_TE;
1053 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1054
1055 /* Make sure hardware complete it */
1056 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1057 readl, (!(sts & DMA_GSTS_TES)), sts);
1058
1059 spin_unlock_irqrestore(&iommu->register_lock, flag);
1060 return 0;
1061}
1062
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001063
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001064static int iommu_init_domains(struct intel_iommu *iommu)
1065{
1066 unsigned long ndomains;
1067 unsigned long nlongs;
1068
1069 ndomains = cap_ndoms(iommu->cap);
1070 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1071 nlongs = BITS_TO_LONGS(ndomains);
1072
1073 /* TBD: there might be 64K domains,
1074 * consider other allocation for future chip
1075 */
1076 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1077 if (!iommu->domain_ids) {
1078 printk(KERN_ERR "Allocating domain id array failed\n");
1079 return -ENOMEM;
1080 }
1081 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1082 GFP_KERNEL);
1083 if (!iommu->domains) {
1084 printk(KERN_ERR "Allocating domain array failed\n");
1085 kfree(iommu->domain_ids);
1086 return -ENOMEM;
1087 }
1088
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001089 spin_lock_init(&iommu->lock);
1090
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001091 /*
1092 * if Caching mode is set, then invalid translations are tagged
1093 * with domainid 0. Hence we need to pre-allocate it.
1094 */
1095 if (cap_caching_mode(iommu->cap))
1096 set_bit(0, iommu->domain_ids);
1097 return 0;
1098}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001099
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001100
1101static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001102static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001103
1104void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105{
1106 struct dmar_domain *domain;
1107 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001108 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001109
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001110 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1111 for (; i < cap_ndoms(iommu->cap); ) {
1112 domain = iommu->domains[i];
1113 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001114
1115 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001116 if (--domain->iommu_count == 0) {
1117 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1118 vm_domain_exit(domain);
1119 else
1120 domain_exit(domain);
1121 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001122 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1123
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001124 i = find_next_bit(iommu->domain_ids,
1125 cap_ndoms(iommu->cap), i+1);
1126 }
1127
1128 if (iommu->gcmd & DMA_GCMD_TE)
1129 iommu_disable_translation(iommu);
1130
1131 if (iommu->irq) {
1132 set_irq_data(iommu->irq, NULL);
1133 /* This will mask the irq */
1134 free_irq(iommu->irq, iommu);
1135 destroy_irq(iommu->irq);
1136 }
1137
1138 kfree(iommu->domains);
1139 kfree(iommu->domain_ids);
1140
Weidong Hand9630fe2008-12-08 11:06:32 +08001141 g_iommus[iommu->seq_id] = NULL;
1142
1143 /* if all iommus are freed, free g_iommus */
1144 for (i = 0; i < g_num_of_iommus; i++) {
1145 if (g_iommus[i])
1146 break;
1147 }
1148
1149 if (i == g_num_of_iommus)
1150 kfree(g_iommus);
1151
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001152 /* free context mapping */
1153 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154}
1155
1156static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1157{
1158 unsigned long num;
1159 unsigned long ndomains;
1160 struct dmar_domain *domain;
1161 unsigned long flags;
1162
1163 domain = alloc_domain_mem();
1164 if (!domain)
1165 return NULL;
1166
1167 ndomains = cap_ndoms(iommu->cap);
1168
1169 spin_lock_irqsave(&iommu->lock, flags);
1170 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1171 if (num >= ndomains) {
1172 spin_unlock_irqrestore(&iommu->lock, flags);
1173 free_domain_mem(domain);
1174 printk(KERN_ERR "IOMMU: no free domain ids\n");
1175 return NULL;
1176 }
1177
1178 set_bit(num, iommu->domain_ids);
1179 domain->id = num;
Weidong Han8c11e792008-12-08 15:29:22 +08001180 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1181 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hand71a2f32008-12-07 21:13:41 +08001182 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001183 iommu->domains[num] = domain;
1184 spin_unlock_irqrestore(&iommu->lock, flags);
1185
1186 return domain;
1187}
1188
1189static void iommu_free_domain(struct dmar_domain *domain)
1190{
1191 unsigned long flags;
Weidong Han8c11e792008-12-08 15:29:22 +08001192 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193
Weidong Han8c11e792008-12-08 15:29:22 +08001194 iommu = domain_get_iommu(domain);
1195
1196 spin_lock_irqsave(&iommu->lock, flags);
1197 clear_bit(domain->id, iommu->domain_ids);
1198 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199}
1200
1201static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001202static struct lock_class_key reserved_alloc_key;
1203static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204
1205static void dmar_init_reserved_ranges(void)
1206{
1207 struct pci_dev *pdev = NULL;
1208 struct iova *iova;
1209 int i;
1210 u64 addr, size;
1211
David Millerf6611972008-02-06 01:36:23 -08001212 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Mark Gross8a443df2008-03-04 14:59:31 -08001214 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1215 &reserved_alloc_key);
1216 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1217 &reserved_rbtree_key);
1218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219 /* IOAPIC ranges shouldn't be accessed by DMA */
1220 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1221 IOVA_PFN(IOAPIC_RANGE_END));
1222 if (!iova)
1223 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1224
1225 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1226 for_each_pci_dev(pdev) {
1227 struct resource *r;
1228
1229 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1230 r = &pdev->resource[i];
1231 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1232 continue;
1233 addr = r->start;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001234 addr &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001236 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001237 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1238 IOVA_PFN(size + addr) - 1);
1239 if (!iova)
1240 printk(KERN_ERR "Reserve iova failed\n");
1241 }
1242 }
1243
1244}
1245
1246static void domain_reserve_special_ranges(struct dmar_domain *domain)
1247{
1248 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1249}
1250
1251static inline int guestwidth_to_adjustwidth(int gaw)
1252{
1253 int agaw;
1254 int r = (gaw - 12) % 9;
1255
1256 if (r == 0)
1257 agaw = gaw;
1258 else
1259 agaw = gaw + 9 - r;
1260 if (agaw > 64)
1261 agaw = 64;
1262 return agaw;
1263}
1264
1265static int domain_init(struct dmar_domain *domain, int guest_width)
1266{
1267 struct intel_iommu *iommu;
1268 int adjust_width, agaw;
1269 unsigned long sagaw;
1270
David Millerf6611972008-02-06 01:36:23 -08001271 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001273 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274
1275 domain_reserve_special_ranges(domain);
1276
1277 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001278 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279 if (guest_width > cap_mgaw(iommu->cap))
1280 guest_width = cap_mgaw(iommu->cap);
1281 domain->gaw = guest_width;
1282 adjust_width = guestwidth_to_adjustwidth(guest_width);
1283 agaw = width_to_agaw(adjust_width);
1284 sagaw = cap_sagaw(iommu->cap);
1285 if (!test_bit(agaw, &sagaw)) {
1286 /* hardware doesn't support it, choose a bigger one */
1287 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1288 agaw = find_next_bit(&sagaw, 5, agaw);
1289 if (agaw >= 5)
1290 return -ENODEV;
1291 }
1292 domain->agaw = agaw;
1293 INIT_LIST_HEAD(&domain->devices);
1294
Weidong Han8e6040972008-12-08 15:49:06 +08001295 if (ecap_coherent(iommu->ecap))
1296 domain->iommu_coherency = 1;
1297 else
1298 domain->iommu_coherency = 0;
1299
Sheng Yang58c610b2009-03-18 15:33:05 +08001300 if (ecap_sc_support(iommu->ecap))
1301 domain->iommu_snooping = 1;
1302 else
1303 domain->iommu_snooping = 0;
1304
Weidong Hanc7151a82008-12-08 22:51:37 +08001305 domain->iommu_count = 1;
1306
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 /* always allocate the top pgd */
1308 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1309 if (!domain->pgd)
1310 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001311 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312 return 0;
1313}
1314
1315static void domain_exit(struct dmar_domain *domain)
1316{
1317 u64 end;
1318
1319 /* Domain 0 is reserved, so dont process it */
1320 if (!domain)
1321 return;
1322
1323 domain_remove_dev_info(domain);
1324 /* destroy iovas */
1325 put_iova_domain(&domain->iovad);
1326 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001327 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328
1329 /* clear ptes */
1330 dma_pte_clear_range(domain, 0, end);
1331
1332 /* free page tables */
1333 dma_pte_free_pagetable(domain, 0, end);
1334
1335 iommu_free_domain(domain);
1336 free_domain_mem(domain);
1337}
1338
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001339static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1340 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
1342 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001343 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001344 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001345 struct dma_pte *pgd;
1346 unsigned long num;
1347 unsigned long ndomains;
1348 int id;
1349 int agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001350
1351 pr_debug("Set context mapping for %02x:%02x.%d\n",
1352 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001353
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001354 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001355 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1356 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001357
David Woodhouse276dbf992009-04-04 01:45:37 +01001358 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001359 if (!iommu)
1360 return -ENODEV;
1361
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001362 context = device_to_context_entry(iommu, bus, devfn);
1363 if (!context)
1364 return -ENOMEM;
1365 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001366 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367 spin_unlock_irqrestore(&iommu->lock, flags);
1368 return 0;
1369 }
1370
Weidong Hanea6606b2008-12-08 23:08:15 +08001371 id = domain->id;
1372 pgd = domain->pgd;
1373
1374 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1375 int found = 0;
1376
1377 /* find an available domain id for this device in iommu */
1378 ndomains = cap_ndoms(iommu->cap);
1379 num = find_first_bit(iommu->domain_ids, ndomains);
1380 for (; num < ndomains; ) {
1381 if (iommu->domains[num] == domain) {
1382 id = num;
1383 found = 1;
1384 break;
1385 }
1386 num = find_next_bit(iommu->domain_ids,
1387 cap_ndoms(iommu->cap), num+1);
1388 }
1389
1390 if (found == 0) {
1391 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1392 if (num >= ndomains) {
1393 spin_unlock_irqrestore(&iommu->lock, flags);
1394 printk(KERN_ERR "IOMMU: no free domain ids\n");
1395 return -EFAULT;
1396 }
1397
1398 set_bit(num, iommu->domain_ids);
1399 iommu->domains[num] = domain;
1400 id = num;
1401 }
1402
1403 /* Skip top levels of page tables for
1404 * iommu which has less agaw than default.
1405 */
1406 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1407 pgd = phys_to_virt(dma_pte_addr(pgd));
1408 if (!dma_pte_present(pgd)) {
1409 spin_unlock_irqrestore(&iommu->lock, flags);
1410 return -ENOMEM;
1411 }
1412 }
1413 }
1414
1415 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001416
1417 /*
1418 * In pass through mode, AW must be programmed to indicate the largest
1419 * AGAW value supported by hardware. And ASR is ignored by hardware.
1420 */
1421 if (likely(translation == CONTEXT_TT_MULTI_LEVEL)) {
1422 context_set_address_width(context, iommu->agaw);
1423 context_set_address_root(context, virt_to_phys(pgd));
1424 } else
1425 context_set_address_width(context, iommu->msagaw);
1426
1427 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001428 context_set_fault_enable(context);
1429 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001430 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001431
1432 /* it's a non-present to present mapping */
Youquan Songa77b67d2008-10-16 16:31:56 -07001433 if (iommu->flush.flush_context(iommu, domain->id,
1434 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1435 DMA_CCMD_DEVICE_INVL, 1))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436 iommu_flush_write_buffer(iommu);
1437 else
Youquan Songa77b67d2008-10-16 16:31:56 -07001438 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001441
1442 spin_lock_irqsave(&domain->iommu_lock, flags);
1443 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1444 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001445 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001446 }
1447 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 return 0;
1449}
1450
1451static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001452domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1453 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001454{
1455 int ret;
1456 struct pci_dev *tmp, *parent;
1457
David Woodhouse276dbf992009-04-04 01:45:37 +01001458 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001459 pdev->bus->number, pdev->devfn,
1460 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461 if (ret)
1462 return ret;
1463
1464 /* dependent device mapping */
1465 tmp = pci_find_upstream_pcie_bridge(pdev);
1466 if (!tmp)
1467 return 0;
1468 /* Secondary interface's bus number and devfn 0 */
1469 parent = pdev->bus->self;
1470 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001471 ret = domain_context_mapping_one(domain,
1472 pci_domain_nr(parent->bus),
1473 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001474 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001475 if (ret)
1476 return ret;
1477 parent = parent->bus->self;
1478 }
1479 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1480 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001481 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001482 tmp->subordinate->number, 0,
1483 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001484 else /* this is a legacy PCI bridge */
1485 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001486 pci_domain_nr(tmp->bus),
1487 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001488 tmp->devfn,
1489 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001490}
1491
Weidong Han5331fe62008-12-08 23:00:00 +08001492static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001493{
1494 int ret;
1495 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001496 struct intel_iommu *iommu;
1497
David Woodhouse276dbf992009-04-04 01:45:37 +01001498 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1499 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001500 if (!iommu)
1501 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001502
David Woodhouse276dbf992009-04-04 01:45:37 +01001503 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001504 if (!ret)
1505 return ret;
1506 /* dependent device mapping */
1507 tmp = pci_find_upstream_pcie_bridge(pdev);
1508 if (!tmp)
1509 return ret;
1510 /* Secondary interface's bus number and devfn 0 */
1511 parent = pdev->bus->self;
1512 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001513 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001514 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001515 if (!ret)
1516 return ret;
1517 parent = parent->bus->self;
1518 }
1519 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001520 return device_context_mapped(iommu, tmp->subordinate->number,
1521 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001522 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001523 return device_context_mapped(iommu, tmp->bus->number,
1524 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001525}
1526
1527static int
1528domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1529 u64 hpa, size_t size, int prot)
1530{
1531 u64 start_pfn, end_pfn;
1532 struct dma_pte *pte;
1533 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001534 int addr_width = agaw_to_width(domain->agaw);
1535
1536 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001537
1538 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1539 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001540 iova &= PAGE_MASK;
1541 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1542 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543 index = 0;
1544 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001545 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 if (!pte)
1547 return -ENOMEM;
1548 /* We don't need lock here, nobody else
1549 * touches the iova range
1550 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001551 BUG_ON(dma_pte_addr(pte));
1552 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1553 dma_set_pte_prot(pte, prot);
Sheng Yang9cf06692009-03-18 15:33:07 +08001554 if (prot & DMA_PTE_SNP)
1555 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001556 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001557 start_pfn++;
1558 index++;
1559 }
1560 return 0;
1561}
1562
Weidong Hanc7151a82008-12-08 22:51:37 +08001563static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001564{
Weidong Hanc7151a82008-12-08 22:51:37 +08001565 if (!iommu)
1566 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001567
1568 clear_context_table(iommu, bus, devfn);
1569 iommu->flush.flush_context(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001570 DMA_CCMD_GLOBAL_INVL, 0);
Weidong Han8c11e792008-12-08 15:29:22 +08001571 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001572 DMA_TLB_GLOBAL_FLUSH, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001573}
1574
1575static void domain_remove_dev_info(struct dmar_domain *domain)
1576{
1577 struct device_domain_info *info;
1578 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001579 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580
1581 spin_lock_irqsave(&device_domain_lock, flags);
1582 while (!list_empty(&domain->devices)) {
1583 info = list_entry(domain->devices.next,
1584 struct device_domain_info, link);
1585 list_del(&info->link);
1586 list_del(&info->global);
1587 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001588 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589 spin_unlock_irqrestore(&device_domain_lock, flags);
1590
David Woodhouse276dbf992009-04-04 01:45:37 +01001591 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001592 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593 free_devinfo_mem(info);
1594
1595 spin_lock_irqsave(&device_domain_lock, flags);
1596 }
1597 spin_unlock_irqrestore(&device_domain_lock, flags);
1598}
1599
1600/*
1601 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001602 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001603 */
Kay, Allen M38717942008-09-09 18:37:29 +03001604static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001605find_domain(struct pci_dev *pdev)
1606{
1607 struct device_domain_info *info;
1608
1609 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001610 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001611 if (info)
1612 return info->domain;
1613 return NULL;
1614}
1615
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001616/* domain is initialized */
1617static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1618{
1619 struct dmar_domain *domain, *found = NULL;
1620 struct intel_iommu *iommu;
1621 struct dmar_drhd_unit *drhd;
1622 struct device_domain_info *info, *tmp;
1623 struct pci_dev *dev_tmp;
1624 unsigned long flags;
1625 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001626 int segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001627
1628 domain = find_domain(pdev);
1629 if (domain)
1630 return domain;
1631
David Woodhouse276dbf992009-04-04 01:45:37 +01001632 segment = pci_domain_nr(pdev->bus);
1633
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1635 if (dev_tmp) {
1636 if (dev_tmp->is_pcie) {
1637 bus = dev_tmp->subordinate->number;
1638 devfn = 0;
1639 } else {
1640 bus = dev_tmp->bus->number;
1641 devfn = dev_tmp->devfn;
1642 }
1643 spin_lock_irqsave(&device_domain_lock, flags);
1644 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001645 if (info->segment == segment &&
1646 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647 found = info->domain;
1648 break;
1649 }
1650 }
1651 spin_unlock_irqrestore(&device_domain_lock, flags);
1652 /* pcie-pci bridge already has a domain, uses it */
1653 if (found) {
1654 domain = found;
1655 goto found_domain;
1656 }
1657 }
1658
1659 /* Allocate new domain for the device */
1660 drhd = dmar_find_matched_drhd_unit(pdev);
1661 if (!drhd) {
1662 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1663 pci_name(pdev));
1664 return NULL;
1665 }
1666 iommu = drhd->iommu;
1667
1668 domain = iommu_alloc_domain(iommu);
1669 if (!domain)
1670 goto error;
1671
1672 if (domain_init(domain, gaw)) {
1673 domain_exit(domain);
1674 goto error;
1675 }
1676
1677 /* register pcie-to-pci device */
1678 if (dev_tmp) {
1679 info = alloc_devinfo_mem();
1680 if (!info) {
1681 domain_exit(domain);
1682 goto error;
1683 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001684 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001685 info->bus = bus;
1686 info->devfn = devfn;
1687 info->dev = NULL;
1688 info->domain = domain;
1689 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001690 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691
1692 /* pcie-to-pci bridge already has a domain, uses it */
1693 found = NULL;
1694 spin_lock_irqsave(&device_domain_lock, flags);
1695 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001696 if (tmp->segment == segment &&
1697 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001698 found = tmp->domain;
1699 break;
1700 }
1701 }
1702 if (found) {
1703 free_devinfo_mem(info);
1704 domain_exit(domain);
1705 domain = found;
1706 } else {
1707 list_add(&info->link, &domain->devices);
1708 list_add(&info->global, &device_domain_list);
1709 }
1710 spin_unlock_irqrestore(&device_domain_lock, flags);
1711 }
1712
1713found_domain:
1714 info = alloc_devinfo_mem();
1715 if (!info)
1716 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001717 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718 info->bus = pdev->bus->number;
1719 info->devfn = pdev->devfn;
1720 info->dev = pdev;
1721 info->domain = domain;
1722 spin_lock_irqsave(&device_domain_lock, flags);
1723 /* somebody is fast */
1724 found = find_domain(pdev);
1725 if (found != NULL) {
1726 spin_unlock_irqrestore(&device_domain_lock, flags);
1727 if (found != domain) {
1728 domain_exit(domain);
1729 domain = found;
1730 }
1731 free_devinfo_mem(info);
1732 return domain;
1733 }
1734 list_add(&info->link, &domain->devices);
1735 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001736 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001737 spin_unlock_irqrestore(&device_domain_lock, flags);
1738 return domain;
1739error:
1740 /* recheck it here, maybe others set it */
1741 return find_domain(pdev);
1742}
1743
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001744static int iommu_prepare_identity_map(struct pci_dev *pdev,
1745 unsigned long long start,
1746 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001747{
1748 struct dmar_domain *domain;
1749 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001750 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751 int ret;
1752
1753 printk(KERN_INFO
1754 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1755 pci_name(pdev), start, end);
1756 /* page table init */
1757 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1758 if (!domain)
1759 return -ENOMEM;
1760
1761 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001762 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001763 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001764 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1766 IOVA_PFN(base + size) - 1)) {
1767 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1768 ret = -ENOMEM;
1769 goto error;
1770 }
1771
1772 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1773 size, base, pci_name(pdev));
1774 /*
1775 * RMRR range might have overlap with physical memory range,
1776 * clear it first
1777 */
1778 dma_pte_clear_range(domain, base, base + size);
1779
1780 ret = domain_page_mapping(domain, base, base, size,
1781 DMA_PTE_READ|DMA_PTE_WRITE);
1782 if (ret)
1783 goto error;
1784
1785 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001786 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 if (!ret)
1788 return 0;
1789error:
1790 domain_exit(domain);
1791 return ret;
1792
1793}
1794
1795static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1796 struct pci_dev *pdev)
1797{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001798 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 return 0;
1800 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1801 rmrr->end_address + 1);
1802}
1803
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001804#ifdef CONFIG_DMAR_GFX_WA
Yinghai Lud52d53b2008-06-16 20:10:55 -07001805struct iommu_prepare_data {
1806 struct pci_dev *pdev;
1807 int ret;
1808};
1809
1810static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1811 unsigned long end_pfn, void *datax)
1812{
1813 struct iommu_prepare_data *data;
1814
1815 data = (struct iommu_prepare_data *)datax;
1816
1817 data->ret = iommu_prepare_identity_map(data->pdev,
1818 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1819 return data->ret;
1820
1821}
1822
1823static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1824{
1825 int nid;
1826 struct iommu_prepare_data data;
1827
1828 data.pdev = pdev;
1829 data.ret = 0;
1830
1831 for_each_online_node(nid) {
1832 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1833 if (data.ret)
1834 return data.ret;
1835 }
1836 return data.ret;
1837}
1838
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001839static void __init iommu_prepare_gfx_mapping(void)
1840{
1841 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001842 int ret;
1843
1844 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001845 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001846 !IS_GFX_DEVICE(pdev))
1847 continue;
1848 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1849 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001850 ret = iommu_prepare_with_active_regions(pdev);
1851 if (ret)
1852 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001853 }
1854}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001855#else /* !CONFIG_DMAR_GFX_WA */
1856static inline void iommu_prepare_gfx_mapping(void)
1857{
1858 return;
1859}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001860#endif
1861
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001862#ifdef CONFIG_DMAR_FLOPPY_WA
1863static inline void iommu_prepare_isa(void)
1864{
1865 struct pci_dev *pdev;
1866 int ret;
1867
1868 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1869 if (!pdev)
1870 return;
1871
1872 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1873 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1874
1875 if (ret)
Frank Seidel1c35b8e2009-02-06 10:23:36 +01001876 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001877 "floppy might not work\n");
1878
1879}
1880#else
1881static inline void iommu_prepare_isa(void)
1882{
1883 return;
1884}
1885#endif /* !CONFIG_DMAR_FLPY_WA */
1886
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001887/* Initialize each context entry as pass through.*/
1888static int __init init_context_pass_through(void)
1889{
1890 struct pci_dev *pdev = NULL;
1891 struct dmar_domain *domain;
1892 int ret;
1893
1894 for_each_pci_dev(pdev) {
1895 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1896 ret = domain_context_mapping(domain, pdev,
1897 CONTEXT_TT_PASS_THROUGH);
1898 if (ret)
1899 return ret;
1900 }
1901 return 0;
1902}
1903
Mark McLoughlin519a0542008-11-20 14:21:13 +00001904static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905{
1906 struct dmar_drhd_unit *drhd;
1907 struct dmar_rmrr_unit *rmrr;
1908 struct pci_dev *pdev;
1909 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001910 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001911 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912
1913 /*
1914 * for each drhd
1915 * allocate root
1916 * initialize and program root entry to not present
1917 * endfor
1918 */
1919 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08001920 g_num_of_iommus++;
1921 /*
1922 * lock not needed as this is only incremented in the single
1923 * threaded kernel __init code path all other access are read
1924 * only
1925 */
1926 }
1927
Weidong Hand9630fe2008-12-08 11:06:32 +08001928 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1929 GFP_KERNEL);
1930 if (!g_iommus) {
1931 printk(KERN_ERR "Allocating global iommu array failed\n");
1932 ret = -ENOMEM;
1933 goto error;
1934 }
1935
mark gross80b20dd2008-04-18 13:53:58 -07001936 deferred_flush = kzalloc(g_num_of_iommus *
1937 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1938 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08001939 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08001940 ret = -ENOMEM;
1941 goto error;
1942 }
1943
mark gross5e0d2a62008-03-04 15:22:08 -08001944 for_each_drhd_unit(drhd) {
1945 if (drhd->ignored)
1946 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001947
1948 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08001949 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001950
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001951 ret = iommu_init_domains(iommu);
1952 if (ret)
1953 goto error;
1954
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955 /*
1956 * TBD:
1957 * we could share the same root & context tables
1958 * amoung all IOMMU's. Need to Split it later.
1959 */
1960 ret = iommu_alloc_root_entry(iommu);
1961 if (ret) {
1962 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1963 goto error;
1964 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001965 if (!ecap_pass_through(iommu->ecap))
1966 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001967 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001968 if (iommu_pass_through)
1969 if (!pass_through) {
1970 printk(KERN_INFO
1971 "Pass Through is not supported by hardware.\n");
1972 iommu_pass_through = 0;
1973 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001974
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001975 /*
1976 * Start from the sane iommu hardware state.
1977 */
Youquan Songa77b67d2008-10-16 16:31:56 -07001978 for_each_drhd_unit(drhd) {
1979 if (drhd->ignored)
1980 continue;
1981
1982 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001983
1984 /*
1985 * If the queued invalidation is already initialized by us
1986 * (for example, while enabling interrupt-remapping) then
1987 * we got the things already rolling from a sane state.
1988 */
1989 if (iommu->qi)
1990 continue;
1991
1992 /*
1993 * Clear any previous faults.
1994 */
1995 dmar_fault(-1, iommu);
1996 /*
1997 * Disable queued invalidation if supported and already enabled
1998 * before OS handover.
1999 */
2000 dmar_disable_qi(iommu);
2001 }
2002
2003 for_each_drhd_unit(drhd) {
2004 if (drhd->ignored)
2005 continue;
2006
2007 iommu = drhd->iommu;
2008
Youquan Songa77b67d2008-10-16 16:31:56 -07002009 if (dmar_enable_qi(iommu)) {
2010 /*
2011 * Queued Invalidate not enabled, use Register Based
2012 * Invalidate
2013 */
2014 iommu->flush.flush_context = __iommu_flush_context;
2015 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2016 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002017 "invalidation\n",
2018 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002019 } else {
2020 iommu->flush.flush_context = qi_flush_context;
2021 iommu->flush.flush_iotlb = qi_flush_iotlb;
2022 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002023 "invalidation\n",
2024 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002025 }
2026 }
2027
Han, Weidongd0b03bd2009-04-03 17:15:50 +08002028#ifdef CONFIG_INTR_REMAP
2029 if (!intr_remapping_enabled) {
2030 ret = enable_intr_remapping(0);
2031 if (ret)
2032 printk(KERN_ERR
2033 "IOMMU: enable interrupt remapping failed\n");
2034 }
2035#endif
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002036 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002037 * If pass through is set and enabled, context entries of all pci
2038 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002039 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002040 if (iommu_pass_through) {
2041 ret = init_context_pass_through();
2042 if (ret) {
2043 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2044 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002045 }
2046 }
2047
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002048 /*
2049 * If pass through is not set or not enabled, setup context entries for
2050 * identity mappings for rmrr, gfx, and isa.
2051 */
2052 if (!iommu_pass_through) {
2053 /*
2054 * For each rmrr
2055 * for each dev attached to rmrr
2056 * do
2057 * locate drhd for dev, alloc domain for dev
2058 * allocate free domain
2059 * allocate page table entries for rmrr
2060 * if context not allocated for bus
2061 * allocate and init context
2062 * set present in root table for this bus
2063 * init context with domain, translation etc
2064 * endfor
2065 * endfor
2066 */
2067 for_each_rmrr_units(rmrr) {
2068 for (i = 0; i < rmrr->devices_cnt; i++) {
2069 pdev = rmrr->devices[i];
2070 /*
2071 * some BIOS lists non-exist devices in DMAR
2072 * table.
2073 */
2074 if (!pdev)
2075 continue;
2076 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2077 if (ret)
2078 printk(KERN_ERR
2079 "IOMMU: mapping reserved region failed\n");
2080 }
2081 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002082
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002083 iommu_prepare_gfx_mapping();
2084
2085 iommu_prepare_isa();
2086 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002087
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002088 /*
2089 * for each drhd
2090 * enable fault log
2091 * global invalidate context cache
2092 * global invalidate iotlb
2093 * enable translation
2094 */
2095 for_each_drhd_unit(drhd) {
2096 if (drhd->ignored)
2097 continue;
2098 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002099
2100 iommu_flush_write_buffer(iommu);
2101
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002102 ret = dmar_set_interrupt(iommu);
2103 if (ret)
2104 goto error;
2105
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106 iommu_set_root_entry(iommu);
2107
Youquan Songa77b67d2008-10-16 16:31:56 -07002108 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
2109 0);
2110 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2111 0);
mark grossf8bab732008-02-08 04:18:38 -08002112 iommu_disable_protect_mem_regions(iommu);
2113
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002114 ret = iommu_enable_translation(iommu);
2115 if (ret)
2116 goto error;
2117 }
2118
2119 return 0;
2120error:
2121 for_each_drhd_unit(drhd) {
2122 if (drhd->ignored)
2123 continue;
2124 iommu = drhd->iommu;
2125 free_iommu(iommu);
2126 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002127 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128 return ret;
2129}
2130
2131static inline u64 aligned_size(u64 host_addr, size_t size)
2132{
2133 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002134 addr = (host_addr & (~PAGE_MASK)) + size;
2135 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002136}
2137
2138struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002139iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002141 struct iova *piova;
2142
2143 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002144 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002145 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002146 return NULL;
2147
2148 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002149 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002150 return piova;
2151}
2152
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002153static struct iova *
2154__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002155 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002156{
2157 struct pci_dev *pdev = to_pci_dev(dev);
2158 struct iova *iova = NULL;
2159
Yang Hongyang284901a2009-04-06 19:01:15 -07002160 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002161 iova = iommu_alloc_iova(domain, size, dma_mask);
2162 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002163 /*
2164 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002165 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002166 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002167 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002168 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002169 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002170 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002171 }
2172
2173 if (!iova) {
2174 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2175 return NULL;
2176 }
2177
2178 return iova;
2179}
2180
2181static struct dmar_domain *
2182get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002183{
2184 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002185 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002186
2187 domain = get_domain_for_dev(pdev,
2188 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2189 if (!domain) {
2190 printk(KERN_ERR
2191 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002192 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193 }
2194
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002195 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002196 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002197 ret = domain_context_mapping(domain, pdev,
2198 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002199 if (ret) {
2200 printk(KERN_ERR
2201 "Domain context map for %s failed",
2202 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002203 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002204 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002205 }
2206
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002207 return domain;
2208}
2209
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002210static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2211 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002212{
2213 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002214 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002215 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002216 struct iova *iova;
2217 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002218 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002219 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002220
2221 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002222 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002223 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002224
2225 domain = get_valid_domain_for_dev(pdev);
2226 if (!domain)
2227 return 0;
2228
Weidong Han8c11e792008-12-08 15:29:22 +08002229 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002230 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002231
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002232 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002233 if (!iova)
2234 goto error;
2235
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002236 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002237
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002238 /*
2239 * Check if DMAR supports zero-length reads on write only
2240 * mappings..
2241 */
2242 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002243 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002244 prot |= DMA_PTE_READ;
2245 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2246 prot |= DMA_PTE_WRITE;
2247 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002248 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002249 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002250 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002251 * is not a big problem
2252 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002253 ret = domain_page_mapping(domain, start_paddr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002254 ((u64)paddr) & PAGE_MASK, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002255 if (ret)
2256 goto error;
2257
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002258 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002259 ret = iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002260 start_paddr, size >> VTD_PAGE_SHIFT, 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002261 if (ret)
Weidong Han8c11e792008-12-08 15:29:22 +08002262 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002263
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002264 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002265
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002266error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002267 if (iova)
2268 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002269 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002270 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002271 return 0;
2272}
2273
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002274static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2275 unsigned long offset, size_t size,
2276 enum dma_data_direction dir,
2277 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002278{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002279 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2280 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002281}
2282
mark gross5e0d2a62008-03-04 15:22:08 -08002283static void flush_unmaps(void)
2284{
mark gross80b20dd2008-04-18 13:53:58 -07002285 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002286
mark gross5e0d2a62008-03-04 15:22:08 -08002287 timer_on = 0;
2288
2289 /* just flush them all */
2290 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002291 struct intel_iommu *iommu = g_iommus[i];
2292 if (!iommu)
2293 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002294
Weidong Hana2bb8452008-12-08 11:24:12 +08002295 if (deferred_flush[i].next) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002296 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2297 DMA_TLB_GLOBAL_FLUSH, 0);
mark gross80b20dd2008-04-18 13:53:58 -07002298 for (j = 0; j < deferred_flush[i].next; j++) {
2299 __free_iova(&deferred_flush[i].domain[j]->iovad,
2300 deferred_flush[i].iova[j]);
2301 }
2302 deferred_flush[i].next = 0;
2303 }
mark gross5e0d2a62008-03-04 15:22:08 -08002304 }
2305
mark gross5e0d2a62008-03-04 15:22:08 -08002306 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002307}
2308
2309static void flush_unmaps_timeout(unsigned long data)
2310{
mark gross80b20dd2008-04-18 13:53:58 -07002311 unsigned long flags;
2312
2313 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002314 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002315 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002316}
2317
2318static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2319{
2320 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002321 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002322 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002323
2324 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002325 if (list_size == HIGH_WATER_MARK)
2326 flush_unmaps();
2327
Weidong Han8c11e792008-12-08 15:29:22 +08002328 iommu = domain_get_iommu(dom);
2329 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002330
mark gross80b20dd2008-04-18 13:53:58 -07002331 next = deferred_flush[iommu_id].next;
2332 deferred_flush[iommu_id].domain[next] = dom;
2333 deferred_flush[iommu_id].iova[next] = iova;
2334 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002335
2336 if (!timer_on) {
2337 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2338 timer_on = 1;
2339 }
2340 list_size++;
2341 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2342}
2343
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002344static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2345 size_t size, enum dma_data_direction dir,
2346 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002347{
2348 struct pci_dev *pdev = to_pci_dev(dev);
2349 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002350 unsigned long start_addr;
2351 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002352 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002354 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002355 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002356 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002357 BUG_ON(!domain);
2358
Weidong Han8c11e792008-12-08 15:29:22 +08002359 iommu = domain_get_iommu(domain);
2360
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002361 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2362 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002363 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002364
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002365 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002366 size = aligned_size((u64)dev_addr, size);
2367
David Woodhouse4cf2e752009-02-11 17:23:43 +00002368 pr_debug("Device %s unmapping: %zx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002369 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002370
2371 /* clear the whole page */
2372 dma_pte_clear_range(domain, start_addr, start_addr + size);
2373 /* free page tables */
2374 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002375 if (intel_iommu_strict) {
Weidong Han8c11e792008-12-08 15:29:22 +08002376 if (iommu_flush_iotlb_psi(iommu,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002377 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002378 iommu_flush_write_buffer(iommu);
mark gross5e0d2a62008-03-04 15:22:08 -08002379 /* free iova */
2380 __free_iova(&domain->iovad, iova);
2381 } else {
2382 add_unmap(domain, iova);
2383 /*
2384 * queue up the release of the unmap to save the 1/6th of the
2385 * cpu used up by the iotlb flush operation...
2386 */
mark gross5e0d2a62008-03-04 15:22:08 -08002387 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388}
2389
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002390static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2391 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002392{
2393 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2394}
2395
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002396static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2397 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002398{
2399 void *vaddr;
2400 int order;
2401
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002402 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002403 order = get_order(size);
2404 flags &= ~(GFP_DMA | GFP_DMA32);
2405
2406 vaddr = (void *)__get_free_pages(flags, order);
2407 if (!vaddr)
2408 return NULL;
2409 memset(vaddr, 0, size);
2410
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002411 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2412 DMA_BIDIRECTIONAL,
2413 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002414 if (*dma_handle)
2415 return vaddr;
2416 free_pages((unsigned long)vaddr, order);
2417 return NULL;
2418}
2419
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002420static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2421 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002422{
2423 int order;
2424
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002425 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002426 order = get_order(size);
2427
2428 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2429 free_pages((unsigned long)vaddr, order);
2430}
2431
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002432static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2433 int nelems, enum dma_data_direction dir,
2434 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002435{
2436 int i;
2437 struct pci_dev *pdev = to_pci_dev(hwdev);
2438 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002439 unsigned long start_addr;
2440 struct iova *iova;
2441 size_t size = 0;
David Woodhouse4cf2e752009-02-11 17:23:43 +00002442 phys_addr_t addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002443 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002444 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002445
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002446 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002447 return;
2448
2449 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002450 BUG_ON(!domain);
2451
2452 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002453
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002454 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002455 if (!iova)
2456 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002457 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002458 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002459 size += aligned_size((u64)addr, sg->length);
2460 }
2461
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002462 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002463
2464 /* clear the whole page */
2465 dma_pte_clear_range(domain, start_addr, start_addr + size);
2466 /* free page tables */
2467 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2468
Weidong Han8c11e792008-12-08 15:29:22 +08002469 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002470 size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002471 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002472
2473 /* free iova */
2474 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002475}
2476
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002477static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002478 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002479{
2480 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002481 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002482
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002483 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002484 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002485 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002486 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002487 }
2488 return nelems;
2489}
2490
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002491static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2492 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002493{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002494 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002495 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002496 struct pci_dev *pdev = to_pci_dev(hwdev);
2497 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002498 size_t size = 0;
2499 int prot = 0;
2500 size_t offset = 0;
2501 struct iova *iova = NULL;
2502 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002503 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002504 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002505 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002506
2507 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002508 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002509 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002510
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002511 domain = get_valid_domain_for_dev(pdev);
2512 if (!domain)
2513 return 0;
2514
Weidong Han8c11e792008-12-08 15:29:22 +08002515 iommu = domain_get_iommu(domain);
2516
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002517 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002518 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002519 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002520 }
2521
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002522 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002523 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002524 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002525 return 0;
2526 }
2527
2528 /*
2529 * Check if DMAR supports zero-length reads on write only
2530 * mappings..
2531 */
2532 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002533 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002534 prot |= DMA_PTE_READ;
2535 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2536 prot |= DMA_PTE_WRITE;
2537
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002538 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002539 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002540 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002541 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002542 size = aligned_size((u64)addr, sg->length);
2543 ret = domain_page_mapping(domain, start_addr + offset,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002544 ((u64)addr) & PAGE_MASK,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002545 size, prot);
2546 if (ret) {
2547 /* clear the page */
2548 dma_pte_clear_range(domain, start_addr,
2549 start_addr + offset);
2550 /* free page tables */
2551 dma_pte_free_pagetable(domain, start_addr,
2552 start_addr + offset);
2553 /* free iova */
2554 __free_iova(&domain->iovad, iova);
2555 return 0;
2556 }
2557 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002558 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002559 sg->dma_length = sg->length;
2560 offset += size;
2561 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562
2563 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002564 if (iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002565 start_addr, offset >> VTD_PAGE_SHIFT, 1))
Weidong Han8c11e792008-12-08 15:29:22 +08002566 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002567 return nelems;
2568}
2569
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002570static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2571{
2572 return !dma_addr;
2573}
2574
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002575struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002576 .alloc_coherent = intel_alloc_coherent,
2577 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002578 .map_sg = intel_map_sg,
2579 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002580 .map_page = intel_map_page,
2581 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002582 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002583};
2584
2585static inline int iommu_domain_cache_init(void)
2586{
2587 int ret = 0;
2588
2589 iommu_domain_cache = kmem_cache_create("iommu_domain",
2590 sizeof(struct dmar_domain),
2591 0,
2592 SLAB_HWCACHE_ALIGN,
2593
2594 NULL);
2595 if (!iommu_domain_cache) {
2596 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2597 ret = -ENOMEM;
2598 }
2599
2600 return ret;
2601}
2602
2603static inline int iommu_devinfo_cache_init(void)
2604{
2605 int ret = 0;
2606
2607 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2608 sizeof(struct device_domain_info),
2609 0,
2610 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002611 NULL);
2612 if (!iommu_devinfo_cache) {
2613 printk(KERN_ERR "Couldn't create devinfo cache\n");
2614 ret = -ENOMEM;
2615 }
2616
2617 return ret;
2618}
2619
2620static inline int iommu_iova_cache_init(void)
2621{
2622 int ret = 0;
2623
2624 iommu_iova_cache = kmem_cache_create("iommu_iova",
2625 sizeof(struct iova),
2626 0,
2627 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002628 NULL);
2629 if (!iommu_iova_cache) {
2630 printk(KERN_ERR "Couldn't create iova cache\n");
2631 ret = -ENOMEM;
2632 }
2633
2634 return ret;
2635}
2636
2637static int __init iommu_init_mempool(void)
2638{
2639 int ret;
2640 ret = iommu_iova_cache_init();
2641 if (ret)
2642 return ret;
2643
2644 ret = iommu_domain_cache_init();
2645 if (ret)
2646 goto domain_error;
2647
2648 ret = iommu_devinfo_cache_init();
2649 if (!ret)
2650 return ret;
2651
2652 kmem_cache_destroy(iommu_domain_cache);
2653domain_error:
2654 kmem_cache_destroy(iommu_iova_cache);
2655
2656 return -ENOMEM;
2657}
2658
2659static void __init iommu_exit_mempool(void)
2660{
2661 kmem_cache_destroy(iommu_devinfo_cache);
2662 kmem_cache_destroy(iommu_domain_cache);
2663 kmem_cache_destroy(iommu_iova_cache);
2664
2665}
2666
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667static void __init init_no_remapping_devices(void)
2668{
2669 struct dmar_drhd_unit *drhd;
2670
2671 for_each_drhd_unit(drhd) {
2672 if (!drhd->include_all) {
2673 int i;
2674 for (i = 0; i < drhd->devices_cnt; i++)
2675 if (drhd->devices[i] != NULL)
2676 break;
2677 /* ignore DMAR unit if no pci devices exist */
2678 if (i == drhd->devices_cnt)
2679 drhd->ignored = 1;
2680 }
2681 }
2682
2683 if (dmar_map_gfx)
2684 return;
2685
2686 for_each_drhd_unit(drhd) {
2687 int i;
2688 if (drhd->ignored || drhd->include_all)
2689 continue;
2690
2691 for (i = 0; i < drhd->devices_cnt; i++)
2692 if (drhd->devices[i] &&
2693 !IS_GFX_DEVICE(drhd->devices[i]))
2694 break;
2695
2696 if (i < drhd->devices_cnt)
2697 continue;
2698
2699 /* bypass IOMMU if it is just for gfx devices */
2700 drhd->ignored = 1;
2701 for (i = 0; i < drhd->devices_cnt; i++) {
2702 if (!drhd->devices[i])
2703 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002704 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002705 }
2706 }
2707}
2708
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002709#ifdef CONFIG_SUSPEND
2710static int init_iommu_hw(void)
2711{
2712 struct dmar_drhd_unit *drhd;
2713 struct intel_iommu *iommu = NULL;
2714
2715 for_each_active_iommu(iommu, drhd)
2716 if (iommu->qi)
2717 dmar_reenable_qi(iommu);
2718
2719 for_each_active_iommu(iommu, drhd) {
2720 iommu_flush_write_buffer(iommu);
2721
2722 iommu_set_root_entry(iommu);
2723
2724 iommu->flush.flush_context(iommu, 0, 0, 0,
2725 DMA_CCMD_GLOBAL_INVL, 0);
2726 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2727 DMA_TLB_GLOBAL_FLUSH, 0);
2728 iommu_disable_protect_mem_regions(iommu);
2729 iommu_enable_translation(iommu);
2730 }
2731
2732 return 0;
2733}
2734
2735static void iommu_flush_all(void)
2736{
2737 struct dmar_drhd_unit *drhd;
2738 struct intel_iommu *iommu;
2739
2740 for_each_active_iommu(iommu, drhd) {
2741 iommu->flush.flush_context(iommu, 0, 0, 0,
2742 DMA_CCMD_GLOBAL_INVL, 0);
2743 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2744 DMA_TLB_GLOBAL_FLUSH, 0);
2745 }
2746}
2747
2748static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2749{
2750 struct dmar_drhd_unit *drhd;
2751 struct intel_iommu *iommu = NULL;
2752 unsigned long flag;
2753
2754 for_each_active_iommu(iommu, drhd) {
2755 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2756 GFP_ATOMIC);
2757 if (!iommu->iommu_state)
2758 goto nomem;
2759 }
2760
2761 iommu_flush_all();
2762
2763 for_each_active_iommu(iommu, drhd) {
2764 iommu_disable_translation(iommu);
2765
2766 spin_lock_irqsave(&iommu->register_lock, flag);
2767
2768 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2769 readl(iommu->reg + DMAR_FECTL_REG);
2770 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2771 readl(iommu->reg + DMAR_FEDATA_REG);
2772 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2773 readl(iommu->reg + DMAR_FEADDR_REG);
2774 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2775 readl(iommu->reg + DMAR_FEUADDR_REG);
2776
2777 spin_unlock_irqrestore(&iommu->register_lock, flag);
2778 }
2779 return 0;
2780
2781nomem:
2782 for_each_active_iommu(iommu, drhd)
2783 kfree(iommu->iommu_state);
2784
2785 return -ENOMEM;
2786}
2787
2788static int iommu_resume(struct sys_device *dev)
2789{
2790 struct dmar_drhd_unit *drhd;
2791 struct intel_iommu *iommu = NULL;
2792 unsigned long flag;
2793
2794 if (init_iommu_hw()) {
2795 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2796 return -EIO;
2797 }
2798
2799 for_each_active_iommu(iommu, drhd) {
2800
2801 spin_lock_irqsave(&iommu->register_lock, flag);
2802
2803 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2804 iommu->reg + DMAR_FECTL_REG);
2805 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2806 iommu->reg + DMAR_FEDATA_REG);
2807 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2808 iommu->reg + DMAR_FEADDR_REG);
2809 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2810 iommu->reg + DMAR_FEUADDR_REG);
2811
2812 spin_unlock_irqrestore(&iommu->register_lock, flag);
2813 }
2814
2815 for_each_active_iommu(iommu, drhd)
2816 kfree(iommu->iommu_state);
2817
2818 return 0;
2819}
2820
2821static struct sysdev_class iommu_sysclass = {
2822 .name = "iommu",
2823 .resume = iommu_resume,
2824 .suspend = iommu_suspend,
2825};
2826
2827static struct sys_device device_iommu = {
2828 .cls = &iommu_sysclass,
2829};
2830
2831static int __init init_iommu_sysfs(void)
2832{
2833 int error;
2834
2835 error = sysdev_class_register(&iommu_sysclass);
2836 if (error)
2837 return error;
2838
2839 error = sysdev_register(&device_iommu);
2840 if (error)
2841 sysdev_class_unregister(&iommu_sysclass);
2842
2843 return error;
2844}
2845
2846#else
2847static int __init init_iommu_sysfs(void)
2848{
2849 return 0;
2850}
2851#endif /* CONFIG_PM */
2852
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002853int __init intel_iommu_init(void)
2854{
2855 int ret = 0;
2856
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002857 if (dmar_table_init())
2858 return -ENODEV;
2859
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002860 if (dmar_dev_scope_init())
2861 return -ENODEV;
2862
Suresh Siddha2ae21012008-07-10 11:16:43 -07002863 /*
2864 * Check the need for DMA-remapping initialization now.
2865 * Above initialization will also be used by Interrupt-remapping.
2866 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002867 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07002868 return -ENODEV;
2869
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002870 iommu_init_mempool();
2871 dmar_init_reserved_ranges();
2872
2873 init_no_remapping_devices();
2874
2875 ret = init_dmars();
2876 if (ret) {
2877 printk(KERN_ERR "IOMMU: dmar init failed\n");
2878 put_iova_domain(&reserved_iova_list);
2879 iommu_exit_mempool();
2880 return ret;
2881 }
2882 printk(KERN_INFO
2883 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2884
mark gross5e0d2a62008-03-04 15:22:08 -08002885 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002886 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002887
2888 if (!iommu_pass_through) {
2889 printk(KERN_INFO
2890 "Multi-level page-table translation for DMAR.\n");
2891 dma_ops = &intel_dma_ops;
2892 } else
2893 printk(KERN_INFO
2894 "DMAR: Pass through translation for DMAR.\n");
2895
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002896 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01002897
2898 register_iommu(&intel_iommu_ops);
2899
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002900 return 0;
2901}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002902
Weidong Hanc7151a82008-12-08 22:51:37 +08002903static int vm_domain_add_dev_info(struct dmar_domain *domain,
2904 struct pci_dev *pdev)
2905{
2906 struct device_domain_info *info;
2907 unsigned long flags;
2908
2909 info = alloc_devinfo_mem();
2910 if (!info)
2911 return -ENOMEM;
2912
David Woodhouse276dbf992009-04-04 01:45:37 +01002913 info->segment = pci_domain_nr(pdev->bus);
Weidong Hanc7151a82008-12-08 22:51:37 +08002914 info->bus = pdev->bus->number;
2915 info->devfn = pdev->devfn;
2916 info->dev = pdev;
2917 info->domain = domain;
2918
2919 spin_lock_irqsave(&device_domain_lock, flags);
2920 list_add(&info->link, &domain->devices);
2921 list_add(&info->global, &device_domain_list);
2922 pdev->dev.archdata.iommu = info;
2923 spin_unlock_irqrestore(&device_domain_lock, flags);
2924
2925 return 0;
2926}
2927
Han, Weidong3199aa62009-02-26 17:31:12 +08002928static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2929 struct pci_dev *pdev)
2930{
2931 struct pci_dev *tmp, *parent;
2932
2933 if (!iommu || !pdev)
2934 return;
2935
2936 /* dependent device detach */
2937 tmp = pci_find_upstream_pcie_bridge(pdev);
2938 /* Secondary interface's bus number and devfn 0 */
2939 if (tmp) {
2940 parent = pdev->bus->self;
2941 while (parent != tmp) {
2942 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01002943 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08002944 parent = parent->bus->self;
2945 }
2946 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2947 iommu_detach_dev(iommu,
2948 tmp->subordinate->number, 0);
2949 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01002950 iommu_detach_dev(iommu, tmp->bus->number,
2951 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08002952 }
2953}
2954
Weidong Hanc7151a82008-12-08 22:51:37 +08002955static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2956 struct pci_dev *pdev)
2957{
2958 struct device_domain_info *info;
2959 struct intel_iommu *iommu;
2960 unsigned long flags;
2961 int found = 0;
2962 struct list_head *entry, *tmp;
2963
David Woodhouse276dbf992009-04-04 01:45:37 +01002964 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
2965 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08002966 if (!iommu)
2967 return;
2968
2969 spin_lock_irqsave(&device_domain_lock, flags);
2970 list_for_each_safe(entry, tmp, &domain->devices) {
2971 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01002972 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08002973 if (info->bus == pdev->bus->number &&
2974 info->devfn == pdev->devfn) {
2975 list_del(&info->link);
2976 list_del(&info->global);
2977 if (info->dev)
2978 info->dev->dev.archdata.iommu = NULL;
2979 spin_unlock_irqrestore(&device_domain_lock, flags);
2980
2981 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08002982 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08002983 free_devinfo_mem(info);
2984
2985 spin_lock_irqsave(&device_domain_lock, flags);
2986
2987 if (found)
2988 break;
2989 else
2990 continue;
2991 }
2992
2993 /* if there is no other devices under the same iommu
2994 * owned by this domain, clear this iommu in iommu_bmp
2995 * update iommu count and coherency
2996 */
David Woodhouse276dbf992009-04-04 01:45:37 +01002997 if (iommu == device_to_iommu(info->segment, info->bus,
2998 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08002999 found = 1;
3000 }
3001
3002 if (found == 0) {
3003 unsigned long tmp_flags;
3004 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3005 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3006 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003007 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003008 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3009 }
3010
3011 spin_unlock_irqrestore(&device_domain_lock, flags);
3012}
3013
3014static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3015{
3016 struct device_domain_info *info;
3017 struct intel_iommu *iommu;
3018 unsigned long flags1, flags2;
3019
3020 spin_lock_irqsave(&device_domain_lock, flags1);
3021 while (!list_empty(&domain->devices)) {
3022 info = list_entry(domain->devices.next,
3023 struct device_domain_info, link);
3024 list_del(&info->link);
3025 list_del(&info->global);
3026 if (info->dev)
3027 info->dev->dev.archdata.iommu = NULL;
3028
3029 spin_unlock_irqrestore(&device_domain_lock, flags1);
3030
David Woodhouse276dbf992009-04-04 01:45:37 +01003031 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003032 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003033 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003034
3035 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003036 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003037 */
3038 spin_lock_irqsave(&domain->iommu_lock, flags2);
3039 if (test_and_clear_bit(iommu->seq_id,
3040 &domain->iommu_bmp)) {
3041 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003042 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003043 }
3044 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3045
3046 free_devinfo_mem(info);
3047 spin_lock_irqsave(&device_domain_lock, flags1);
3048 }
3049 spin_unlock_irqrestore(&device_domain_lock, flags1);
3050}
3051
Weidong Han5e98c4b2008-12-08 23:03:27 +08003052/* domain id for virtual machine, it won't be set in context */
3053static unsigned long vm_domid;
3054
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003055static int vm_domain_min_agaw(struct dmar_domain *domain)
3056{
3057 int i;
3058 int min_agaw = domain->agaw;
3059
3060 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3061 for (; i < g_num_of_iommus; ) {
3062 if (min_agaw > g_iommus[i]->agaw)
3063 min_agaw = g_iommus[i]->agaw;
3064
3065 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3066 }
3067
3068 return min_agaw;
3069}
3070
Weidong Han5e98c4b2008-12-08 23:03:27 +08003071static struct dmar_domain *iommu_alloc_vm_domain(void)
3072{
3073 struct dmar_domain *domain;
3074
3075 domain = alloc_domain_mem();
3076 if (!domain)
3077 return NULL;
3078
3079 domain->id = vm_domid++;
3080 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3081 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3082
3083 return domain;
3084}
3085
3086static int vm_domain_init(struct dmar_domain *domain, int guest_width)
3087{
3088 int adjust_width;
3089
3090 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3091 spin_lock_init(&domain->mapping_lock);
3092 spin_lock_init(&domain->iommu_lock);
3093
3094 domain_reserve_special_ranges(domain);
3095
3096 /* calculate AGAW */
3097 domain->gaw = guest_width;
3098 adjust_width = guestwidth_to_adjustwidth(guest_width);
3099 domain->agaw = width_to_agaw(adjust_width);
3100
3101 INIT_LIST_HEAD(&domain->devices);
3102
3103 domain->iommu_count = 0;
3104 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003105 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003106
3107 /* always allocate the top pgd */
3108 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3109 if (!domain->pgd)
3110 return -ENOMEM;
3111 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3112 return 0;
3113}
3114
3115static void iommu_free_vm_domain(struct dmar_domain *domain)
3116{
3117 unsigned long flags;
3118 struct dmar_drhd_unit *drhd;
3119 struct intel_iommu *iommu;
3120 unsigned long i;
3121 unsigned long ndomains;
3122
3123 for_each_drhd_unit(drhd) {
3124 if (drhd->ignored)
3125 continue;
3126 iommu = drhd->iommu;
3127
3128 ndomains = cap_ndoms(iommu->cap);
3129 i = find_first_bit(iommu->domain_ids, ndomains);
3130 for (; i < ndomains; ) {
3131 if (iommu->domains[i] == domain) {
3132 spin_lock_irqsave(&iommu->lock, flags);
3133 clear_bit(i, iommu->domain_ids);
3134 iommu->domains[i] = NULL;
3135 spin_unlock_irqrestore(&iommu->lock, flags);
3136 break;
3137 }
3138 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3139 }
3140 }
3141}
3142
3143static void vm_domain_exit(struct dmar_domain *domain)
3144{
3145 u64 end;
3146
3147 /* Domain 0 is reserved, so dont process it */
3148 if (!domain)
3149 return;
3150
3151 vm_domain_remove_all_dev_info(domain);
3152 /* destroy iovas */
3153 put_iova_domain(&domain->iovad);
3154 end = DOMAIN_MAX_ADDR(domain->gaw);
3155 end = end & (~VTD_PAGE_MASK);
3156
3157 /* clear ptes */
3158 dma_pte_clear_range(domain, 0, end);
3159
3160 /* free page tables */
3161 dma_pte_free_pagetable(domain, 0, end);
3162
3163 iommu_free_vm_domain(domain);
3164 free_domain_mem(domain);
3165}
3166
Joerg Roedel5d450802008-12-03 14:52:32 +01003167static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003168{
Joerg Roedel5d450802008-12-03 14:52:32 +01003169 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003170
Joerg Roedel5d450802008-12-03 14:52:32 +01003171 dmar_domain = iommu_alloc_vm_domain();
3172 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003173 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003174 "intel_iommu_domain_init: dmar_domain == NULL\n");
3175 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003176 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003177 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003178 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003179 "intel_iommu_domain_init() failed\n");
3180 vm_domain_exit(dmar_domain);
3181 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003182 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003183 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003184
Joerg Roedel5d450802008-12-03 14:52:32 +01003185 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003186}
Kay, Allen M38717942008-09-09 18:37:29 +03003187
Joerg Roedel5d450802008-12-03 14:52:32 +01003188static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003189{
Joerg Roedel5d450802008-12-03 14:52:32 +01003190 struct dmar_domain *dmar_domain = domain->priv;
3191
3192 domain->priv = NULL;
3193 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003194}
Kay, Allen M38717942008-09-09 18:37:29 +03003195
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003196static int intel_iommu_attach_device(struct iommu_domain *domain,
3197 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003198{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003199 struct dmar_domain *dmar_domain = domain->priv;
3200 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003201 struct intel_iommu *iommu;
3202 int addr_width;
3203 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003204 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003205
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003206 /* normally pdev is not mapped */
3207 if (unlikely(domain_context_mapped(pdev))) {
3208 struct dmar_domain *old_domain;
3209
3210 old_domain = find_domain(pdev);
3211 if (old_domain) {
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003212 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003213 vm_domain_remove_one_dev_info(old_domain, pdev);
3214 else
3215 domain_remove_dev_info(old_domain);
3216 }
3217 }
3218
David Woodhouse276dbf992009-04-04 01:45:37 +01003219 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3220 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003221 if (!iommu)
3222 return -ENODEV;
3223
3224 /* check if this iommu agaw is sufficient for max mapped address */
3225 addr_width = agaw_to_width(iommu->agaw);
3226 end = DOMAIN_MAX_ADDR(addr_width);
3227 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003228 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003229 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3230 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003231 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003232 return -EFAULT;
3233 }
3234
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003235 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003236 if (ret)
3237 return ret;
3238
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003239 ret = vm_domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003240 return ret;
3241}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003242
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003243static void intel_iommu_detach_device(struct iommu_domain *domain,
3244 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003245{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003246 struct dmar_domain *dmar_domain = domain->priv;
3247 struct pci_dev *pdev = to_pci_dev(dev);
3248
3249 vm_domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003250}
Kay, Allen M38717942008-09-09 18:37:29 +03003251
Joerg Roedeldde57a22008-12-03 15:04:09 +01003252static int intel_iommu_map_range(struct iommu_domain *domain,
3253 unsigned long iova, phys_addr_t hpa,
3254 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003255{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003256 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003257 u64 max_addr;
3258 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003259 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003260 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003261
Joerg Roedeldde57a22008-12-03 15:04:09 +01003262 if (iommu_prot & IOMMU_READ)
3263 prot |= DMA_PTE_READ;
3264 if (iommu_prot & IOMMU_WRITE)
3265 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003266 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3267 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003268
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003269 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003270 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003271 int min_agaw;
3272 u64 end;
3273
3274 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003275 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003276 addr_width = agaw_to_width(min_agaw);
3277 end = DOMAIN_MAX_ADDR(addr_width);
3278 end = end & VTD_PAGE_MASK;
3279 if (end < max_addr) {
3280 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3281 "sufficient for the mapped address (%llx)\n",
3282 __func__, min_agaw, max_addr);
3283 return -EFAULT;
3284 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003285 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003286 }
3287
Joerg Roedeldde57a22008-12-03 15:04:09 +01003288 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003289 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003290}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003291
Joerg Roedeldde57a22008-12-03 15:04:09 +01003292static void intel_iommu_unmap_range(struct iommu_domain *domain,
3293 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003294{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003295 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003296 dma_addr_t base;
3297
3298 /* The address might not be aligned */
3299 base = iova & VTD_PAGE_MASK;
3300 size = VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003301 dma_pte_clear_range(dmar_domain, base, base + size);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003302
Joerg Roedeldde57a22008-12-03 15:04:09 +01003303 if (dmar_domain->max_addr == base + size)
3304 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003305}
Kay, Allen M38717942008-09-09 18:37:29 +03003306
Joerg Roedeld14d6572008-12-03 15:06:57 +01003307static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3308 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003309{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003310 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003311 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003312 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003313
Joerg Roedeld14d6572008-12-03 15:06:57 +01003314 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03003315 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003316 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003317
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003318 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003319}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003320
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003321static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3322 unsigned long cap)
3323{
3324 struct dmar_domain *dmar_domain = domain->priv;
3325
3326 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3327 return dmar_domain->iommu_snooping;
3328
3329 return 0;
3330}
3331
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003332static struct iommu_ops intel_iommu_ops = {
3333 .domain_init = intel_iommu_domain_init,
3334 .domain_destroy = intel_iommu_domain_destroy,
3335 .attach_dev = intel_iommu_attach_device,
3336 .detach_dev = intel_iommu_detach_device,
3337 .map = intel_iommu_map_range,
3338 .unmap = intel_iommu_unmap_range,
3339 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003340 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003341};
David Woodhouse9af88142009-02-13 23:18:03 +00003342
3343static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3344{
3345 /*
3346 * Mobile 4 Series Chipset neglects to set RWBF capability,
3347 * but needs it:
3348 */
3349 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3350 rwbf_quirk = 1;
3351}
3352
3353DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);