blob: 49402c399232c2cd3d69c7050bc2cee0605cc59d [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070039#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090040#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070041#include "pci.h"
42
Fenghua Yu5b6985c2008-10-16 18:02:32 -070043#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
48
49#define IOAPIC_RANGE_START (0xfee00000)
50#define IOAPIC_RANGE_END (0xfeefffff)
51#define IOVA_START_ADDR (0x1000)
52
53#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
54
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
56
Mark McLoughlinf27be032008-11-20 15:49:43 +000057#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
59#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
mark gross5e0d2a62008-03-04 15:22:08 -080060
Weidong Hand9630fe2008-12-08 11:06:32 +080061/* global iommu list, set NULL for ignored DMAR units */
62static struct intel_iommu **g_iommus;
63
David Woodhouse9af88142009-02-13 23:18:03 +000064static int rwbf_quirk;
65
Mark McLoughlin46b08e12008-11-20 15:49:44 +000066/*
67 * 0: Present
68 * 1-11: Reserved
69 * 12-63: Context Ptr (12 - (haw-1))
70 * 64-127: Reserved
71 */
72struct root_entry {
73 u64 val;
74 u64 rsvd1;
75};
76#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
77static inline bool root_present(struct root_entry *root)
78{
79 return (root->val & 1);
80}
81static inline void set_root_present(struct root_entry *root)
82{
83 root->val |= 1;
84}
85static inline void set_root_value(struct root_entry *root, unsigned long value)
86{
87 root->val |= value & VTD_PAGE_MASK;
88}
89
90static inline struct context_entry *
91get_context_addr_from_root(struct root_entry *root)
92{
93 return (struct context_entry *)
94 (root_present(root)?phys_to_virt(
95 root->val & VTD_PAGE_MASK) :
96 NULL);
97}
98
Mark McLoughlin7a8fc252008-11-20 15:49:45 +000099/*
100 * low 64 bits:
101 * 0: present
102 * 1: fault processing disable
103 * 2-3: translation type
104 * 12-63: address space root
105 * high 64 bits:
106 * 0-2: address width
107 * 3-6: aval
108 * 8-23: domain id
109 */
110struct context_entry {
111 u64 lo;
112 u64 hi;
113};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000114
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000115static inline bool context_present(struct context_entry *context)
116{
117 return (context->lo & 1);
118}
119static inline void context_set_present(struct context_entry *context)
120{
121 context->lo |= 1;
122}
123
124static inline void context_set_fault_enable(struct context_entry *context)
125{
126 context->lo &= (((u64)-1) << 2) | 1;
127}
128
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000129#define CONTEXT_TT_MULTI_LEVEL 0
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000130
131static inline void context_set_translation_type(struct context_entry *context,
132 unsigned long value)
133{
134 context->lo &= (((u64)-1) << 4) | 3;
135 context->lo |= (value & 3) << 2;
136}
137
138static inline void context_set_address_root(struct context_entry *context,
139 unsigned long value)
140{
141 context->lo |= value & VTD_PAGE_MASK;
142}
143
144static inline void context_set_address_width(struct context_entry *context,
145 unsigned long value)
146{
147 context->hi |= value & 7;
148}
149
150static inline void context_set_domain_id(struct context_entry *context,
151 unsigned long value)
152{
153 context->hi |= (value & ((1 << 16) - 1)) << 8;
154}
155
156static inline void context_clear_entry(struct context_entry *context)
157{
158 context->lo = 0;
159 context->hi = 0;
160}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000161
Mark McLoughlin622ba122008-11-20 15:49:46 +0000162/*
163 * 0: readable
164 * 1: writable
165 * 2-6: reserved
166 * 7: super page
167 * 8-11: available
168 * 12-63: Host physcial address
169 */
170struct dma_pte {
171 u64 val;
172};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000173
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000174static inline void dma_clear_pte(struct dma_pte *pte)
175{
176 pte->val = 0;
177}
178
179static inline void dma_set_pte_readable(struct dma_pte *pte)
180{
181 pte->val |= DMA_PTE_READ;
182}
183
184static inline void dma_set_pte_writable(struct dma_pte *pte)
185{
186 pte->val |= DMA_PTE_WRITE;
187}
188
189static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
190{
191 pte->val = (pte->val & ~3) | (prot & 3);
192}
193
194static inline u64 dma_pte_addr(struct dma_pte *pte)
195{
196 return (pte->val & VTD_PAGE_MASK);
197}
198
199static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
200{
201 pte->val |= (addr & VTD_PAGE_MASK);
202}
203
204static inline bool dma_pte_present(struct dma_pte *pte)
205{
206 return (pte->val & 3) != 0;
207}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000208
Weidong Han3b5410e2008-12-08 09:17:15 +0800209/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100210#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800211
Weidong Han1ce28fe2008-12-08 16:35:39 +0800212/* domain represents a virtual machine, more than one devices
213 * across iommus may be owned in one domain, e.g. kvm guest.
214 */
215#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
216
Mark McLoughlin99126f72008-11-20 15:49:47 +0000217struct dmar_domain {
218 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800219 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000220
221 struct list_head devices; /* all devices' list */
222 struct iova_domain iovad; /* iova's that belong to this domain */
223
224 struct dma_pte *pgd; /* virtual address */
225 spinlock_t mapping_lock; /* page table lock */
226 int gaw; /* max guest address width */
227
228 /* adjusted guest address width, 0 is level 2 30-bit */
229 int agaw;
230
Weidong Han3b5410e2008-12-08 09:17:15 +0800231 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800232
233 int iommu_coherency;/* indicate coherency of iommu access */
Weidong Hanc7151a82008-12-08 22:51:37 +0800234 int iommu_count; /* reference count of iommu */
235 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800236 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000237};
238
Mark McLoughlina647dac2008-11-20 15:49:48 +0000239/* PCI domain-device relationship */
240struct device_domain_info {
241 struct list_head link; /* link to domain siblings */
242 struct list_head global; /* link to global list */
243 u8 bus; /* PCI bus numer */
244 u8 devfn; /* PCI devfn number */
245 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
246 struct dmar_domain *domain; /* pointer to domain */
247};
248
mark gross5e0d2a62008-03-04 15:22:08 -0800249static void flush_unmaps_timeout(unsigned long data);
250
251DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
252
mark gross80b20dd2008-04-18 13:53:58 -0700253#define HIGH_WATER_MARK 250
254struct deferred_flush_tables {
255 int next;
256 struct iova *iova[HIGH_WATER_MARK];
257 struct dmar_domain *domain[HIGH_WATER_MARK];
258};
259
260static struct deferred_flush_tables *deferred_flush;
261
mark gross5e0d2a62008-03-04 15:22:08 -0800262/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800263static int g_num_of_iommus;
264
265static DEFINE_SPINLOCK(async_umap_flush_lock);
266static LIST_HEAD(unmaps_to_do);
267
268static int timer_on;
269static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800270
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700271static void domain_remove_dev_info(struct dmar_domain *domain);
272
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800273#ifdef CONFIG_DMAR_DEFAULT_ON
274int dmar_disabled = 0;
275#else
276int dmar_disabled = 1;
277#endif /*CONFIG_DMAR_DEFAULT_ON*/
278
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700279static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700280static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800281static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700282
283#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
284static DEFINE_SPINLOCK(device_domain_lock);
285static LIST_HEAD(device_domain_list);
286
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100287static struct iommu_ops intel_iommu_ops;
288
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700289static int __init intel_iommu_setup(char *str)
290{
291 if (!str)
292 return -EINVAL;
293 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800294 if (!strncmp(str, "on", 2)) {
295 dmar_disabled = 0;
296 printk(KERN_INFO "Intel-IOMMU: enabled\n");
297 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700298 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800299 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700300 } else if (!strncmp(str, "igfx_off", 8)) {
301 dmar_map_gfx = 0;
302 printk(KERN_INFO
303 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700304 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800305 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700306 "Intel-IOMMU: Forcing DAC for PCI devices\n");
307 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800308 } else if (!strncmp(str, "strict", 6)) {
309 printk(KERN_INFO
310 "Intel-IOMMU: disable batched IOTLB flush\n");
311 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700312 }
313
314 str += strcspn(str, ",");
315 while (*str == ',')
316 str++;
317 }
318 return 0;
319}
320__setup("intel_iommu=", intel_iommu_setup);
321
322static struct kmem_cache *iommu_domain_cache;
323static struct kmem_cache *iommu_devinfo_cache;
324static struct kmem_cache *iommu_iova_cache;
325
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700326static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
327{
328 unsigned int flags;
329 void *vaddr;
330
331 /* trying to avoid low memory issues */
332 flags = current->flags & PF_MEMALLOC;
333 current->flags |= PF_MEMALLOC;
334 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
335 current->flags &= (~PF_MEMALLOC | flags);
336 return vaddr;
337}
338
339
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700340static inline void *alloc_pgtable_page(void)
341{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700342 unsigned int flags;
343 void *vaddr;
344
345 /* trying to avoid low memory issues */
346 flags = current->flags & PF_MEMALLOC;
347 current->flags |= PF_MEMALLOC;
348 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
349 current->flags &= (~PF_MEMALLOC | flags);
350 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700351}
352
353static inline void free_pgtable_page(void *vaddr)
354{
355 free_page((unsigned long)vaddr);
356}
357
358static inline void *alloc_domain_mem(void)
359{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700360 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700361}
362
Kay, Allen M38717942008-09-09 18:37:29 +0300363static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700364{
365 kmem_cache_free(iommu_domain_cache, vaddr);
366}
367
368static inline void * alloc_devinfo_mem(void)
369{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700370 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700371}
372
373static inline void free_devinfo_mem(void *vaddr)
374{
375 kmem_cache_free(iommu_devinfo_cache, vaddr);
376}
377
378struct iova *alloc_iova_mem(void)
379{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700380 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700381}
382
383void free_iova_mem(struct iova *iova)
384{
385 kmem_cache_free(iommu_iova_cache, iova);
386}
387
Weidong Han1b573682008-12-08 15:34:06 +0800388
389static inline int width_to_agaw(int width);
390
391/* calculate agaw for each iommu.
392 * "SAGAW" may be different across iommus, use a default agaw, and
393 * get a supported less agaw for iommus that don't support the default agaw.
394 */
395int iommu_calculate_agaw(struct intel_iommu *iommu)
396{
397 unsigned long sagaw;
398 int agaw = -1;
399
400 sagaw = cap_sagaw(iommu->cap);
401 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
402 agaw >= 0; agaw--) {
403 if (test_bit(agaw, &sagaw))
404 break;
405 }
406
407 return agaw;
408}
409
Weidong Han8c11e792008-12-08 15:29:22 +0800410/* in native case, each domain is related to only one iommu */
411static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
412{
413 int iommu_id;
414
Weidong Han1ce28fe2008-12-08 16:35:39 +0800415 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
416
Weidong Han8c11e792008-12-08 15:29:22 +0800417 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
418 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
419 return NULL;
420
421 return g_iommus[iommu_id];
422}
423
Weidong Han8e6040972008-12-08 15:49:06 +0800424/* "Coherency" capability may be different across iommus */
425static void domain_update_iommu_coherency(struct dmar_domain *domain)
426{
427 int i;
428
429 domain->iommu_coherency = 1;
430
431 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
432 for (; i < g_num_of_iommus; ) {
433 if (!ecap_coherent(g_iommus[i]->ecap)) {
434 domain->iommu_coherency = 0;
435 break;
436 }
437 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
438 }
439}
440
Weidong Hanc7151a82008-12-08 22:51:37 +0800441static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
442{
443 struct dmar_drhd_unit *drhd = NULL;
444 int i;
445
446 for_each_drhd_unit(drhd) {
447 if (drhd->ignored)
448 continue;
449
450 for (i = 0; i < drhd->devices_cnt; i++)
Dirk Hohndel288e4872009-01-11 15:33:51 +0000451 if (drhd->devices[i] &&
452 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800453 drhd->devices[i]->devfn == devfn)
454 return drhd->iommu;
455
456 if (drhd->include_all)
457 return drhd->iommu;
458 }
459
460 return NULL;
461}
462
Weidong Han5331fe62008-12-08 23:00:00 +0800463static void domain_flush_cache(struct dmar_domain *domain,
464 void *addr, int size)
465{
466 if (!domain->iommu_coherency)
467 clflush_cache_range(addr, size);
468}
469
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700470/* Gets context entry for a given bus and devfn */
471static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
472 u8 bus, u8 devfn)
473{
474 struct root_entry *root;
475 struct context_entry *context;
476 unsigned long phy_addr;
477 unsigned long flags;
478
479 spin_lock_irqsave(&iommu->lock, flags);
480 root = &iommu->root_entry[bus];
481 context = get_context_addr_from_root(root);
482 if (!context) {
483 context = (struct context_entry *)alloc_pgtable_page();
484 if (!context) {
485 spin_unlock_irqrestore(&iommu->lock, flags);
486 return NULL;
487 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700488 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700489 phy_addr = virt_to_phys((void *)context);
490 set_root_value(root, phy_addr);
491 set_root_present(root);
492 __iommu_flush_cache(iommu, root, sizeof(*root));
493 }
494 spin_unlock_irqrestore(&iommu->lock, flags);
495 return &context[devfn];
496}
497
498static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
499{
500 struct root_entry *root;
501 struct context_entry *context;
502 int ret;
503 unsigned long flags;
504
505 spin_lock_irqsave(&iommu->lock, flags);
506 root = &iommu->root_entry[bus];
507 context = get_context_addr_from_root(root);
508 if (!context) {
509 ret = 0;
510 goto out;
511 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000512 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700513out:
514 spin_unlock_irqrestore(&iommu->lock, flags);
515 return ret;
516}
517
518static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
519{
520 struct root_entry *root;
521 struct context_entry *context;
522 unsigned long flags;
523
524 spin_lock_irqsave(&iommu->lock, flags);
525 root = &iommu->root_entry[bus];
526 context = get_context_addr_from_root(root);
527 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000528 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700529 __iommu_flush_cache(iommu, &context[devfn], \
530 sizeof(*context));
531 }
532 spin_unlock_irqrestore(&iommu->lock, flags);
533}
534
535static void free_context_table(struct intel_iommu *iommu)
536{
537 struct root_entry *root;
538 int i;
539 unsigned long flags;
540 struct context_entry *context;
541
542 spin_lock_irqsave(&iommu->lock, flags);
543 if (!iommu->root_entry) {
544 goto out;
545 }
546 for (i = 0; i < ROOT_ENTRY_NR; i++) {
547 root = &iommu->root_entry[i];
548 context = get_context_addr_from_root(root);
549 if (context)
550 free_pgtable_page(context);
551 }
552 free_pgtable_page(iommu->root_entry);
553 iommu->root_entry = NULL;
554out:
555 spin_unlock_irqrestore(&iommu->lock, flags);
556}
557
558/* page table handling */
559#define LEVEL_STRIDE (9)
560#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
561
562static inline int agaw_to_level(int agaw)
563{
564 return agaw + 2;
565}
566
567static inline int agaw_to_width(int agaw)
568{
569 return 30 + agaw * LEVEL_STRIDE;
570
571}
572
573static inline int width_to_agaw(int width)
574{
575 return (width - 30) / LEVEL_STRIDE;
576}
577
578static inline unsigned int level_to_offset_bits(int level)
579{
580 return (12 + (level - 1) * LEVEL_STRIDE);
581}
582
583static inline int address_level_offset(u64 addr, int level)
584{
585 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
586}
587
588static inline u64 level_mask(int level)
589{
590 return ((u64)-1 << level_to_offset_bits(level));
591}
592
593static inline u64 level_size(int level)
594{
595 return ((u64)1 << level_to_offset_bits(level));
596}
597
598static inline u64 align_to_level(u64 addr, int level)
599{
600 return ((addr + level_size(level) - 1) & level_mask(level));
601}
602
603static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
604{
605 int addr_width = agaw_to_width(domain->agaw);
606 struct dma_pte *parent, *pte = NULL;
607 int level = agaw_to_level(domain->agaw);
608 int offset;
609 unsigned long flags;
610
611 BUG_ON(!domain->pgd);
612
613 addr &= (((u64)1) << addr_width) - 1;
614 parent = domain->pgd;
615
616 spin_lock_irqsave(&domain->mapping_lock, flags);
617 while (level > 0) {
618 void *tmp_page;
619
620 offset = address_level_offset(addr, level);
621 pte = &parent[offset];
622 if (level == 1)
623 break;
624
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000625 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700626 tmp_page = alloc_pgtable_page();
627
628 if (!tmp_page) {
629 spin_unlock_irqrestore(&domain->mapping_lock,
630 flags);
631 return NULL;
632 }
Weidong Han5331fe62008-12-08 23:00:00 +0800633 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000634 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700635 /*
636 * high level table always sets r/w, last level page
637 * table control read/write
638 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000639 dma_set_pte_readable(pte);
640 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800641 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700642 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000643 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700644 level--;
645 }
646
647 spin_unlock_irqrestore(&domain->mapping_lock, flags);
648 return pte;
649}
650
651/* return address's pte at specific level */
652static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
653 int level)
654{
655 struct dma_pte *parent, *pte = NULL;
656 int total = agaw_to_level(domain->agaw);
657 int offset;
658
659 parent = domain->pgd;
660 while (level <= total) {
661 offset = address_level_offset(addr, total);
662 pte = &parent[offset];
663 if (level == total)
664 return pte;
665
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000666 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700667 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000668 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700669 total--;
670 }
671 return NULL;
672}
673
674/* clear one page's page table */
675static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
676{
677 struct dma_pte *pte = NULL;
678
679 /* get last level pte */
680 pte = dma_addr_level_pte(domain, addr, 1);
681
682 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000683 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800684 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700685 }
686}
687
688/* clear last level pte, a tlb flush should be followed */
689static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
690{
691 int addr_width = agaw_to_width(domain->agaw);
692
693 start &= (((u64)1) << addr_width) - 1;
694 end &= (((u64)1) << addr_width) - 1;
695 /* in case it's partial page */
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700696 start = PAGE_ALIGN(start);
697 end &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698
699 /* we don't need lock here, nobody else touches the iova range */
700 while (start < end) {
701 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700702 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703 }
704}
705
706/* free page table pages. last level pte should already be cleared */
707static void dma_pte_free_pagetable(struct dmar_domain *domain,
708 u64 start, u64 end)
709{
710 int addr_width = agaw_to_width(domain->agaw);
711 struct dma_pte *pte;
712 int total = agaw_to_level(domain->agaw);
713 int level;
714 u64 tmp;
715
716 start &= (((u64)1) << addr_width) - 1;
717 end &= (((u64)1) << addr_width) - 1;
718
719 /* we don't need lock here, nobody else touches the iova range */
720 level = 2;
721 while (level <= total) {
722 tmp = align_to_level(start, level);
723 if (tmp >= end || (tmp + level_size(level) > end))
724 return;
725
726 while (tmp < end) {
727 pte = dma_addr_level_pte(domain, tmp, level);
728 if (pte) {
729 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000730 phys_to_virt(dma_pte_addr(pte)));
731 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800732 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 }
734 tmp += level_size(level);
735 }
736 level++;
737 }
738 /* free pgd */
739 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
740 free_pgtable_page(domain->pgd);
741 domain->pgd = NULL;
742 }
743}
744
745/* iommu handling */
746static int iommu_alloc_root_entry(struct intel_iommu *iommu)
747{
748 struct root_entry *root;
749 unsigned long flags;
750
751 root = (struct root_entry *)alloc_pgtable_page();
752 if (!root)
753 return -ENOMEM;
754
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700755 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700756
757 spin_lock_irqsave(&iommu->lock, flags);
758 iommu->root_entry = root;
759 spin_unlock_irqrestore(&iommu->lock, flags);
760
761 return 0;
762}
763
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764static void iommu_set_root_entry(struct intel_iommu *iommu)
765{
766 void *addr;
767 u32 cmd, sts;
768 unsigned long flag;
769
770 addr = iommu->root_entry;
771
772 spin_lock_irqsave(&iommu->register_lock, flag);
773 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
774
775 cmd = iommu->gcmd | DMA_GCMD_SRTP;
776 writel(cmd, iommu->reg + DMAR_GCMD_REG);
777
778 /* Make sure hardware complete it */
779 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
780 readl, (sts & DMA_GSTS_RTPS), sts);
781
782 spin_unlock_irqrestore(&iommu->register_lock, flag);
783}
784
785static void iommu_flush_write_buffer(struct intel_iommu *iommu)
786{
787 u32 val;
788 unsigned long flag;
789
David Woodhouse9af88142009-02-13 23:18:03 +0000790 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700791 return;
792 val = iommu->gcmd | DMA_GCMD_WBF;
793
794 spin_lock_irqsave(&iommu->register_lock, flag);
795 writel(val, iommu->reg + DMAR_GCMD_REG);
796
797 /* Make sure hardware complete it */
798 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
799 readl, (!(val & DMA_GSTS_WBFS)), val);
800
801 spin_unlock_irqrestore(&iommu->register_lock, flag);
802}
803
804/* return value determine if we need a write buffer flush */
805static int __iommu_flush_context(struct intel_iommu *iommu,
806 u16 did, u16 source_id, u8 function_mask, u64 type,
807 int non_present_entry_flush)
808{
809 u64 val = 0;
810 unsigned long flag;
811
812 /*
813 * In the non-present entry flush case, if hardware doesn't cache
814 * non-present entry we do nothing and if hardware cache non-present
815 * entry, we flush entries of domain 0 (the domain id is used to cache
816 * any non-present entries)
817 */
818 if (non_present_entry_flush) {
819 if (!cap_caching_mode(iommu->cap))
820 return 1;
821 else
822 did = 0;
823 }
824
825 switch (type) {
826 case DMA_CCMD_GLOBAL_INVL:
827 val = DMA_CCMD_GLOBAL_INVL;
828 break;
829 case DMA_CCMD_DOMAIN_INVL:
830 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
831 break;
832 case DMA_CCMD_DEVICE_INVL:
833 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
834 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
835 break;
836 default:
837 BUG();
838 }
839 val |= DMA_CCMD_ICC;
840
841 spin_lock_irqsave(&iommu->register_lock, flag);
842 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
843
844 /* Make sure hardware complete it */
845 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
846 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
847
848 spin_unlock_irqrestore(&iommu->register_lock, flag);
849
Ameya Palande4d235ba2008-10-18 20:27:30 -0700850 /* flush context entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851 return 0;
852}
853
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854/* return value determine if we need a write buffer flush */
855static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
856 u64 addr, unsigned int size_order, u64 type,
857 int non_present_entry_flush)
858{
859 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
860 u64 val = 0, val_iva = 0;
861 unsigned long flag;
862
863 /*
864 * In the non-present entry flush case, if hardware doesn't cache
865 * non-present entry we do nothing and if hardware cache non-present
866 * entry, we flush entries of domain 0 (the domain id is used to cache
867 * any non-present entries)
868 */
869 if (non_present_entry_flush) {
870 if (!cap_caching_mode(iommu->cap))
871 return 1;
872 else
873 did = 0;
874 }
875
876 switch (type) {
877 case DMA_TLB_GLOBAL_FLUSH:
878 /* global flush doesn't need set IVA_REG */
879 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
880 break;
881 case DMA_TLB_DSI_FLUSH:
882 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
883 break;
884 case DMA_TLB_PSI_FLUSH:
885 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
886 /* Note: always flush non-leaf currently */
887 val_iva = size_order | addr;
888 break;
889 default:
890 BUG();
891 }
892 /* Note: set drain read/write */
893#if 0
894 /*
895 * This is probably to be super secure.. Looks like we can
896 * ignore it without any impact.
897 */
898 if (cap_read_drain(iommu->cap))
899 val |= DMA_TLB_READ_DRAIN;
900#endif
901 if (cap_write_drain(iommu->cap))
902 val |= DMA_TLB_WRITE_DRAIN;
903
904 spin_lock_irqsave(&iommu->register_lock, flag);
905 /* Note: Only uses first TLB reg currently */
906 if (val_iva)
907 dmar_writeq(iommu->reg + tlb_offset, val_iva);
908 dmar_writeq(iommu->reg + tlb_offset + 8, val);
909
910 /* Make sure hardware complete it */
911 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
912 dmar_readq, (!(val & DMA_TLB_IVT)), val);
913
914 spin_unlock_irqrestore(&iommu->register_lock, flag);
915
916 /* check IOTLB invalidation granularity */
917 if (DMA_TLB_IAIG(val) == 0)
918 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
919 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
920 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700921 (unsigned long long)DMA_TLB_IIRG(type),
922 (unsigned long long)DMA_TLB_IAIG(val));
Ameya Palande4d235ba2008-10-18 20:27:30 -0700923 /* flush iotlb entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700924 return 0;
925}
926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
928 u64 addr, unsigned int pages, int non_present_entry_flush)
929{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700930 unsigned int mask;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700932 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700933 BUG_ON(pages == 0);
934
935 /* Fallback to domain selective flush if no PSI support */
936 if (!cap_pgsel_inv(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700937 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
938 DMA_TLB_DSI_FLUSH,
939 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940
941 /*
942 * PSI requires page size to be 2 ^ x, and the base address is naturally
943 * aligned to the size
944 */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700945 mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700946 /* Fallback to domain selective flush if size is too big */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700947 if (mask > cap_max_amask_val(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700948 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
949 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700950
Youquan Songa77b67d2008-10-16 16:31:56 -0700951 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
952 DMA_TLB_PSI_FLUSH,
953 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954}
955
mark grossf8bab732008-02-08 04:18:38 -0800956static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
957{
958 u32 pmen;
959 unsigned long flags;
960
961 spin_lock_irqsave(&iommu->register_lock, flags);
962 pmen = readl(iommu->reg + DMAR_PMEN_REG);
963 pmen &= ~DMA_PMEN_EPM;
964 writel(pmen, iommu->reg + DMAR_PMEN_REG);
965
966 /* wait for the protected region status bit to clear */
967 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
968 readl, !(pmen & DMA_PMEN_PRS), pmen);
969
970 spin_unlock_irqrestore(&iommu->register_lock, flags);
971}
972
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700973static int iommu_enable_translation(struct intel_iommu *iommu)
974{
975 u32 sts;
976 unsigned long flags;
977
978 spin_lock_irqsave(&iommu->register_lock, flags);
979 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
980
981 /* Make sure hardware complete it */
982 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
983 readl, (sts & DMA_GSTS_TES), sts);
984
985 iommu->gcmd |= DMA_GCMD_TE;
986 spin_unlock_irqrestore(&iommu->register_lock, flags);
987 return 0;
988}
989
990static int iommu_disable_translation(struct intel_iommu *iommu)
991{
992 u32 sts;
993 unsigned long flag;
994
995 spin_lock_irqsave(&iommu->register_lock, flag);
996 iommu->gcmd &= ~DMA_GCMD_TE;
997 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
998
999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1001 readl, (!(sts & DMA_GSTS_TES)), sts);
1002
1003 spin_unlock_irqrestore(&iommu->register_lock, flag);
1004 return 0;
1005}
1006
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001007
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008static int iommu_init_domains(struct intel_iommu *iommu)
1009{
1010 unsigned long ndomains;
1011 unsigned long nlongs;
1012
1013 ndomains = cap_ndoms(iommu->cap);
1014 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1015 nlongs = BITS_TO_LONGS(ndomains);
1016
1017 /* TBD: there might be 64K domains,
1018 * consider other allocation for future chip
1019 */
1020 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1021 if (!iommu->domain_ids) {
1022 printk(KERN_ERR "Allocating domain id array failed\n");
1023 return -ENOMEM;
1024 }
1025 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1026 GFP_KERNEL);
1027 if (!iommu->domains) {
1028 printk(KERN_ERR "Allocating domain array failed\n");
1029 kfree(iommu->domain_ids);
1030 return -ENOMEM;
1031 }
1032
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001033 spin_lock_init(&iommu->lock);
1034
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001035 /*
1036 * if Caching mode is set, then invalid translations are tagged
1037 * with domainid 0. Hence we need to pre-allocate it.
1038 */
1039 if (cap_caching_mode(iommu->cap))
1040 set_bit(0, iommu->domain_ids);
1041 return 0;
1042}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044
1045static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001046static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001047
1048void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001049{
1050 struct dmar_domain *domain;
1051 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001052 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001053
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001054 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1055 for (; i < cap_ndoms(iommu->cap); ) {
1056 domain = iommu->domains[i];
1057 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001058
1059 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001060 if (--domain->iommu_count == 0) {
1061 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1062 vm_domain_exit(domain);
1063 else
1064 domain_exit(domain);
1065 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001066 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1067
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 i = find_next_bit(iommu->domain_ids,
1069 cap_ndoms(iommu->cap), i+1);
1070 }
1071
1072 if (iommu->gcmd & DMA_GCMD_TE)
1073 iommu_disable_translation(iommu);
1074
1075 if (iommu->irq) {
1076 set_irq_data(iommu->irq, NULL);
1077 /* This will mask the irq */
1078 free_irq(iommu->irq, iommu);
1079 destroy_irq(iommu->irq);
1080 }
1081
1082 kfree(iommu->domains);
1083 kfree(iommu->domain_ids);
1084
Weidong Hand9630fe2008-12-08 11:06:32 +08001085 g_iommus[iommu->seq_id] = NULL;
1086
1087 /* if all iommus are freed, free g_iommus */
1088 for (i = 0; i < g_num_of_iommus; i++) {
1089 if (g_iommus[i])
1090 break;
1091 }
1092
1093 if (i == g_num_of_iommus)
1094 kfree(g_iommus);
1095
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001096 /* free context mapping */
1097 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001098}
1099
1100static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1101{
1102 unsigned long num;
1103 unsigned long ndomains;
1104 struct dmar_domain *domain;
1105 unsigned long flags;
1106
1107 domain = alloc_domain_mem();
1108 if (!domain)
1109 return NULL;
1110
1111 ndomains = cap_ndoms(iommu->cap);
1112
1113 spin_lock_irqsave(&iommu->lock, flags);
1114 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1115 if (num >= ndomains) {
1116 spin_unlock_irqrestore(&iommu->lock, flags);
1117 free_domain_mem(domain);
1118 printk(KERN_ERR "IOMMU: no free domain ids\n");
1119 return NULL;
1120 }
1121
1122 set_bit(num, iommu->domain_ids);
1123 domain->id = num;
Weidong Han8c11e792008-12-08 15:29:22 +08001124 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1125 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hand71a2f32008-12-07 21:13:41 +08001126 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001127 iommu->domains[num] = domain;
1128 spin_unlock_irqrestore(&iommu->lock, flags);
1129
1130 return domain;
1131}
1132
1133static void iommu_free_domain(struct dmar_domain *domain)
1134{
1135 unsigned long flags;
Weidong Han8c11e792008-12-08 15:29:22 +08001136 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137
Weidong Han8c11e792008-12-08 15:29:22 +08001138 iommu = domain_get_iommu(domain);
1139
1140 spin_lock_irqsave(&iommu->lock, flags);
1141 clear_bit(domain->id, iommu->domain_ids);
1142 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001143}
1144
1145static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001146static struct lock_class_key reserved_alloc_key;
1147static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
1149static void dmar_init_reserved_ranges(void)
1150{
1151 struct pci_dev *pdev = NULL;
1152 struct iova *iova;
1153 int i;
1154 u64 addr, size;
1155
David Millerf6611972008-02-06 01:36:23 -08001156 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001157
Mark Gross8a443df2008-03-04 14:59:31 -08001158 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1159 &reserved_alloc_key);
1160 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1161 &reserved_rbtree_key);
1162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001163 /* IOAPIC ranges shouldn't be accessed by DMA */
1164 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1165 IOVA_PFN(IOAPIC_RANGE_END));
1166 if (!iova)
1167 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1168
1169 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1170 for_each_pci_dev(pdev) {
1171 struct resource *r;
1172
1173 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1174 r = &pdev->resource[i];
1175 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1176 continue;
1177 addr = r->start;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001178 addr &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001180 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1182 IOVA_PFN(size + addr) - 1);
1183 if (!iova)
1184 printk(KERN_ERR "Reserve iova failed\n");
1185 }
1186 }
1187
1188}
1189
1190static void domain_reserve_special_ranges(struct dmar_domain *domain)
1191{
1192 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1193}
1194
1195static inline int guestwidth_to_adjustwidth(int gaw)
1196{
1197 int agaw;
1198 int r = (gaw - 12) % 9;
1199
1200 if (r == 0)
1201 agaw = gaw;
1202 else
1203 agaw = gaw + 9 - r;
1204 if (agaw > 64)
1205 agaw = 64;
1206 return agaw;
1207}
1208
1209static int domain_init(struct dmar_domain *domain, int guest_width)
1210{
1211 struct intel_iommu *iommu;
1212 int adjust_width, agaw;
1213 unsigned long sagaw;
1214
David Millerf6611972008-02-06 01:36:23 -08001215 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001217 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001218
1219 domain_reserve_special_ranges(domain);
1220
1221 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001222 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001223 if (guest_width > cap_mgaw(iommu->cap))
1224 guest_width = cap_mgaw(iommu->cap);
1225 domain->gaw = guest_width;
1226 adjust_width = guestwidth_to_adjustwidth(guest_width);
1227 agaw = width_to_agaw(adjust_width);
1228 sagaw = cap_sagaw(iommu->cap);
1229 if (!test_bit(agaw, &sagaw)) {
1230 /* hardware doesn't support it, choose a bigger one */
1231 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1232 agaw = find_next_bit(&sagaw, 5, agaw);
1233 if (agaw >= 5)
1234 return -ENODEV;
1235 }
1236 domain->agaw = agaw;
1237 INIT_LIST_HEAD(&domain->devices);
1238
Weidong Han8e6040972008-12-08 15:49:06 +08001239 if (ecap_coherent(iommu->ecap))
1240 domain->iommu_coherency = 1;
1241 else
1242 domain->iommu_coherency = 0;
1243
Weidong Hanc7151a82008-12-08 22:51:37 +08001244 domain->iommu_count = 1;
1245
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 /* always allocate the top pgd */
1247 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1248 if (!domain->pgd)
1249 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001250 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251 return 0;
1252}
1253
1254static void domain_exit(struct dmar_domain *domain)
1255{
1256 u64 end;
1257
1258 /* Domain 0 is reserved, so dont process it */
1259 if (!domain)
1260 return;
1261
1262 domain_remove_dev_info(domain);
1263 /* destroy iovas */
1264 put_iova_domain(&domain->iovad);
1265 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001266 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001267
1268 /* clear ptes */
1269 dma_pte_clear_range(domain, 0, end);
1270
1271 /* free page tables */
1272 dma_pte_free_pagetable(domain, 0, end);
1273
1274 iommu_free_domain(domain);
1275 free_domain_mem(domain);
1276}
1277
1278static int domain_context_mapping_one(struct dmar_domain *domain,
1279 u8 bus, u8 devfn)
1280{
1281 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001282 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001283 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001284 struct dma_pte *pgd;
1285 unsigned long num;
1286 unsigned long ndomains;
1287 int id;
1288 int agaw;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001289
1290 pr_debug("Set context mapping for %02x:%02x.%d\n",
1291 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1292 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08001293
1294 iommu = device_to_iommu(bus, devfn);
1295 if (!iommu)
1296 return -ENODEV;
1297
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298 context = device_to_context_entry(iommu, bus, devfn);
1299 if (!context)
1300 return -ENOMEM;
1301 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001302 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303 spin_unlock_irqrestore(&iommu->lock, flags);
1304 return 0;
1305 }
1306
Weidong Hanea6606b2008-12-08 23:08:15 +08001307 id = domain->id;
1308 pgd = domain->pgd;
1309
1310 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1311 int found = 0;
1312
1313 /* find an available domain id for this device in iommu */
1314 ndomains = cap_ndoms(iommu->cap);
1315 num = find_first_bit(iommu->domain_ids, ndomains);
1316 for (; num < ndomains; ) {
1317 if (iommu->domains[num] == domain) {
1318 id = num;
1319 found = 1;
1320 break;
1321 }
1322 num = find_next_bit(iommu->domain_ids,
1323 cap_ndoms(iommu->cap), num+1);
1324 }
1325
1326 if (found == 0) {
1327 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1328 if (num >= ndomains) {
1329 spin_unlock_irqrestore(&iommu->lock, flags);
1330 printk(KERN_ERR "IOMMU: no free domain ids\n");
1331 return -EFAULT;
1332 }
1333
1334 set_bit(num, iommu->domain_ids);
1335 iommu->domains[num] = domain;
1336 id = num;
1337 }
1338
1339 /* Skip top levels of page tables for
1340 * iommu which has less agaw than default.
1341 */
1342 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1343 pgd = phys_to_virt(dma_pte_addr(pgd));
1344 if (!dma_pte_present(pgd)) {
1345 spin_unlock_irqrestore(&iommu->lock, flags);
1346 return -ENOMEM;
1347 }
1348 }
1349 }
1350
1351 context_set_domain_id(context, id);
1352 context_set_address_width(context, iommu->agaw);
1353 context_set_address_root(context, virt_to_phys(pgd));
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001354 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1355 context_set_fault_enable(context);
1356 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001357 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001358
1359 /* it's a non-present to present mapping */
Youquan Songa77b67d2008-10-16 16:31:56 -07001360 if (iommu->flush.flush_context(iommu, domain->id,
1361 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1362 DMA_CCMD_DEVICE_INVL, 1))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001363 iommu_flush_write_buffer(iommu);
1364 else
Youquan Songa77b67d2008-10-16 16:31:56 -07001365 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1366
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001367 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001368
1369 spin_lock_irqsave(&domain->iommu_lock, flags);
1370 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1371 domain->iommu_count++;
1372 domain_update_iommu_coherency(domain);
1373 }
1374 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001375 return 0;
1376}
1377
1378static int
1379domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1380{
1381 int ret;
1382 struct pci_dev *tmp, *parent;
1383
1384 ret = domain_context_mapping_one(domain, pdev->bus->number,
1385 pdev->devfn);
1386 if (ret)
1387 return ret;
1388
1389 /* dependent device mapping */
1390 tmp = pci_find_upstream_pcie_bridge(pdev);
1391 if (!tmp)
1392 return 0;
1393 /* Secondary interface's bus number and devfn 0 */
1394 parent = pdev->bus->self;
1395 while (parent != tmp) {
1396 ret = domain_context_mapping_one(domain, parent->bus->number,
1397 parent->devfn);
1398 if (ret)
1399 return ret;
1400 parent = parent->bus->self;
1401 }
1402 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1403 return domain_context_mapping_one(domain,
1404 tmp->subordinate->number, 0);
1405 else /* this is a legacy PCI bridge */
1406 return domain_context_mapping_one(domain,
1407 tmp->bus->number, tmp->devfn);
1408}
1409
Weidong Han5331fe62008-12-08 23:00:00 +08001410static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411{
1412 int ret;
1413 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001414 struct intel_iommu *iommu;
1415
1416 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1417 if (!iommu)
1418 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419
Weidong Han8c11e792008-12-08 15:29:22 +08001420 ret = device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421 pdev->bus->number, pdev->devfn);
1422 if (!ret)
1423 return ret;
1424 /* dependent device mapping */
1425 tmp = pci_find_upstream_pcie_bridge(pdev);
1426 if (!tmp)
1427 return ret;
1428 /* Secondary interface's bus number and devfn 0 */
1429 parent = pdev->bus->self;
1430 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001431 ret = device_context_mapped(iommu, parent->bus->number,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432 parent->devfn);
1433 if (!ret)
1434 return ret;
1435 parent = parent->bus->self;
1436 }
1437 if (tmp->is_pcie)
Weidong Han8c11e792008-12-08 15:29:22 +08001438 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001439 tmp->subordinate->number, 0);
1440 else
Weidong Han8c11e792008-12-08 15:29:22 +08001441 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442 tmp->bus->number, tmp->devfn);
1443}
1444
1445static int
1446domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1447 u64 hpa, size_t size, int prot)
1448{
1449 u64 start_pfn, end_pfn;
1450 struct dma_pte *pte;
1451 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001452 int addr_width = agaw_to_width(domain->agaw);
1453
1454 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001455
1456 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1457 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001458 iova &= PAGE_MASK;
1459 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1460 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461 index = 0;
1462 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001463 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 if (!pte)
1465 return -ENOMEM;
1466 /* We don't need lock here, nobody else
1467 * touches the iova range
1468 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001469 BUG_ON(dma_pte_addr(pte));
1470 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1471 dma_set_pte_prot(pte, prot);
Weidong Han5331fe62008-12-08 23:00:00 +08001472 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001473 start_pfn++;
1474 index++;
1475 }
1476 return 0;
1477}
1478
Weidong Hanc7151a82008-12-08 22:51:37 +08001479static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001480{
Weidong Hanc7151a82008-12-08 22:51:37 +08001481 if (!iommu)
1482 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001483
1484 clear_context_table(iommu, bus, devfn);
1485 iommu->flush.flush_context(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001486 DMA_CCMD_GLOBAL_INVL, 0);
Weidong Han8c11e792008-12-08 15:29:22 +08001487 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001488 DMA_TLB_GLOBAL_FLUSH, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001489}
1490
1491static void domain_remove_dev_info(struct dmar_domain *domain)
1492{
1493 struct device_domain_info *info;
1494 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001495 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496
1497 spin_lock_irqsave(&device_domain_lock, flags);
1498 while (!list_empty(&domain->devices)) {
1499 info = list_entry(domain->devices.next,
1500 struct device_domain_info, link);
1501 list_del(&info->link);
1502 list_del(&info->global);
1503 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001504 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001505 spin_unlock_irqrestore(&device_domain_lock, flags);
1506
Weidong Hanc7151a82008-12-08 22:51:37 +08001507 iommu = device_to_iommu(info->bus, info->devfn);
1508 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509 free_devinfo_mem(info);
1510
1511 spin_lock_irqsave(&device_domain_lock, flags);
1512 }
1513 spin_unlock_irqrestore(&device_domain_lock, flags);
1514}
1515
1516/*
1517 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001518 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519 */
Kay, Allen M38717942008-09-09 18:37:29 +03001520static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521find_domain(struct pci_dev *pdev)
1522{
1523 struct device_domain_info *info;
1524
1525 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001526 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001527 if (info)
1528 return info->domain;
1529 return NULL;
1530}
1531
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532/* domain is initialized */
1533static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1534{
1535 struct dmar_domain *domain, *found = NULL;
1536 struct intel_iommu *iommu;
1537 struct dmar_drhd_unit *drhd;
1538 struct device_domain_info *info, *tmp;
1539 struct pci_dev *dev_tmp;
1540 unsigned long flags;
1541 int bus = 0, devfn = 0;
1542
1543 domain = find_domain(pdev);
1544 if (domain)
1545 return domain;
1546
1547 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1548 if (dev_tmp) {
1549 if (dev_tmp->is_pcie) {
1550 bus = dev_tmp->subordinate->number;
1551 devfn = 0;
1552 } else {
1553 bus = dev_tmp->bus->number;
1554 devfn = dev_tmp->devfn;
1555 }
1556 spin_lock_irqsave(&device_domain_lock, flags);
1557 list_for_each_entry(info, &device_domain_list, global) {
1558 if (info->bus == bus && info->devfn == devfn) {
1559 found = info->domain;
1560 break;
1561 }
1562 }
1563 spin_unlock_irqrestore(&device_domain_lock, flags);
1564 /* pcie-pci bridge already has a domain, uses it */
1565 if (found) {
1566 domain = found;
1567 goto found_domain;
1568 }
1569 }
1570
1571 /* Allocate new domain for the device */
1572 drhd = dmar_find_matched_drhd_unit(pdev);
1573 if (!drhd) {
1574 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1575 pci_name(pdev));
1576 return NULL;
1577 }
1578 iommu = drhd->iommu;
1579
1580 domain = iommu_alloc_domain(iommu);
1581 if (!domain)
1582 goto error;
1583
1584 if (domain_init(domain, gaw)) {
1585 domain_exit(domain);
1586 goto error;
1587 }
1588
1589 /* register pcie-to-pci device */
1590 if (dev_tmp) {
1591 info = alloc_devinfo_mem();
1592 if (!info) {
1593 domain_exit(domain);
1594 goto error;
1595 }
1596 info->bus = bus;
1597 info->devfn = devfn;
1598 info->dev = NULL;
1599 info->domain = domain;
1600 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001601 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001602
1603 /* pcie-to-pci bridge already has a domain, uses it */
1604 found = NULL;
1605 spin_lock_irqsave(&device_domain_lock, flags);
1606 list_for_each_entry(tmp, &device_domain_list, global) {
1607 if (tmp->bus == bus && tmp->devfn == devfn) {
1608 found = tmp->domain;
1609 break;
1610 }
1611 }
1612 if (found) {
1613 free_devinfo_mem(info);
1614 domain_exit(domain);
1615 domain = found;
1616 } else {
1617 list_add(&info->link, &domain->devices);
1618 list_add(&info->global, &device_domain_list);
1619 }
1620 spin_unlock_irqrestore(&device_domain_lock, flags);
1621 }
1622
1623found_domain:
1624 info = alloc_devinfo_mem();
1625 if (!info)
1626 goto error;
1627 info->bus = pdev->bus->number;
1628 info->devfn = pdev->devfn;
1629 info->dev = pdev;
1630 info->domain = domain;
1631 spin_lock_irqsave(&device_domain_lock, flags);
1632 /* somebody is fast */
1633 found = find_domain(pdev);
1634 if (found != NULL) {
1635 spin_unlock_irqrestore(&device_domain_lock, flags);
1636 if (found != domain) {
1637 domain_exit(domain);
1638 domain = found;
1639 }
1640 free_devinfo_mem(info);
1641 return domain;
1642 }
1643 list_add(&info->link, &domain->devices);
1644 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001645 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 spin_unlock_irqrestore(&device_domain_lock, flags);
1647 return domain;
1648error:
1649 /* recheck it here, maybe others set it */
1650 return find_domain(pdev);
1651}
1652
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001653static int iommu_prepare_identity_map(struct pci_dev *pdev,
1654 unsigned long long start,
1655 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001656{
1657 struct dmar_domain *domain;
1658 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001659 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660 int ret;
1661
1662 printk(KERN_INFO
1663 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1664 pci_name(pdev), start, end);
1665 /* page table init */
1666 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1667 if (!domain)
1668 return -ENOMEM;
1669
1670 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001671 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001673 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1675 IOVA_PFN(base + size) - 1)) {
1676 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1677 ret = -ENOMEM;
1678 goto error;
1679 }
1680
1681 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1682 size, base, pci_name(pdev));
1683 /*
1684 * RMRR range might have overlap with physical memory range,
1685 * clear it first
1686 */
1687 dma_pte_clear_range(domain, base, base + size);
1688
1689 ret = domain_page_mapping(domain, base, base, size,
1690 DMA_PTE_READ|DMA_PTE_WRITE);
1691 if (ret)
1692 goto error;
1693
1694 /* context entry init */
1695 ret = domain_context_mapping(domain, pdev);
1696 if (!ret)
1697 return 0;
1698error:
1699 domain_exit(domain);
1700 return ret;
1701
1702}
1703
1704static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1705 struct pci_dev *pdev)
1706{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001707 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 return 0;
1709 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1710 rmrr->end_address + 1);
1711}
1712
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001713#ifdef CONFIG_DMAR_GFX_WA
Yinghai Lud52d53b2008-06-16 20:10:55 -07001714struct iommu_prepare_data {
1715 struct pci_dev *pdev;
1716 int ret;
1717};
1718
1719static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1720 unsigned long end_pfn, void *datax)
1721{
1722 struct iommu_prepare_data *data;
1723
1724 data = (struct iommu_prepare_data *)datax;
1725
1726 data->ret = iommu_prepare_identity_map(data->pdev,
1727 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1728 return data->ret;
1729
1730}
1731
1732static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1733{
1734 int nid;
1735 struct iommu_prepare_data data;
1736
1737 data.pdev = pdev;
1738 data.ret = 0;
1739
1740 for_each_online_node(nid) {
1741 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1742 if (data.ret)
1743 return data.ret;
1744 }
1745 return data.ret;
1746}
1747
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001748static void __init iommu_prepare_gfx_mapping(void)
1749{
1750 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001751 int ret;
1752
1753 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001754 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001755 !IS_GFX_DEVICE(pdev))
1756 continue;
1757 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1758 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001759 ret = iommu_prepare_with_active_regions(pdev);
1760 if (ret)
1761 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001762 }
1763}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001764#else /* !CONFIG_DMAR_GFX_WA */
1765static inline void iommu_prepare_gfx_mapping(void)
1766{
1767 return;
1768}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001769#endif
1770
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001771#ifdef CONFIG_DMAR_FLOPPY_WA
1772static inline void iommu_prepare_isa(void)
1773{
1774 struct pci_dev *pdev;
1775 int ret;
1776
1777 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1778 if (!pdev)
1779 return;
1780
1781 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1782 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1783
1784 if (ret)
1785 printk("IOMMU: Failed to create 0-64M identity map, "
1786 "floppy might not work\n");
1787
1788}
1789#else
1790static inline void iommu_prepare_isa(void)
1791{
1792 return;
1793}
1794#endif /* !CONFIG_DMAR_FLPY_WA */
1795
Mark McLoughlin519a0542008-11-20 14:21:13 +00001796static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001797{
1798 struct dmar_drhd_unit *drhd;
1799 struct dmar_rmrr_unit *rmrr;
1800 struct pci_dev *pdev;
1801 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07001802 int i, ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803
1804 /*
1805 * for each drhd
1806 * allocate root
1807 * initialize and program root entry to not present
1808 * endfor
1809 */
1810 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08001811 g_num_of_iommus++;
1812 /*
1813 * lock not needed as this is only incremented in the single
1814 * threaded kernel __init code path all other access are read
1815 * only
1816 */
1817 }
1818
Weidong Hand9630fe2008-12-08 11:06:32 +08001819 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1820 GFP_KERNEL);
1821 if (!g_iommus) {
1822 printk(KERN_ERR "Allocating global iommu array failed\n");
1823 ret = -ENOMEM;
1824 goto error;
1825 }
1826
mark gross80b20dd2008-04-18 13:53:58 -07001827 deferred_flush = kzalloc(g_num_of_iommus *
1828 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1829 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08001830 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08001831 ret = -ENOMEM;
1832 goto error;
1833 }
1834
mark gross5e0d2a62008-03-04 15:22:08 -08001835 for_each_drhd_unit(drhd) {
1836 if (drhd->ignored)
1837 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001838
1839 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08001840 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001841
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001842 ret = iommu_init_domains(iommu);
1843 if (ret)
1844 goto error;
1845
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001846 /*
1847 * TBD:
1848 * we could share the same root & context tables
1849 * amoung all IOMMU's. Need to Split it later.
1850 */
1851 ret = iommu_alloc_root_entry(iommu);
1852 if (ret) {
1853 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1854 goto error;
1855 }
1856 }
1857
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001858 /*
1859 * Start from the sane iommu hardware state.
1860 */
Youquan Songa77b67d2008-10-16 16:31:56 -07001861 for_each_drhd_unit(drhd) {
1862 if (drhd->ignored)
1863 continue;
1864
1865 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07001866
1867 /*
1868 * If the queued invalidation is already initialized by us
1869 * (for example, while enabling interrupt-remapping) then
1870 * we got the things already rolling from a sane state.
1871 */
1872 if (iommu->qi)
1873 continue;
1874
1875 /*
1876 * Clear any previous faults.
1877 */
1878 dmar_fault(-1, iommu);
1879 /*
1880 * Disable queued invalidation if supported and already enabled
1881 * before OS handover.
1882 */
1883 dmar_disable_qi(iommu);
1884 }
1885
1886 for_each_drhd_unit(drhd) {
1887 if (drhd->ignored)
1888 continue;
1889
1890 iommu = drhd->iommu;
1891
Youquan Songa77b67d2008-10-16 16:31:56 -07001892 if (dmar_enable_qi(iommu)) {
1893 /*
1894 * Queued Invalidate not enabled, use Register Based
1895 * Invalidate
1896 */
1897 iommu->flush.flush_context = __iommu_flush_context;
1898 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1899 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001900 "invalidation\n",
1901 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001902 } else {
1903 iommu->flush.flush_context = qi_flush_context;
1904 iommu->flush.flush_iotlb = qi_flush_iotlb;
1905 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001906 "invalidation\n",
1907 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001908 }
1909 }
1910
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001911 /*
1912 * For each rmrr
1913 * for each dev attached to rmrr
1914 * do
1915 * locate drhd for dev, alloc domain for dev
1916 * allocate free domain
1917 * allocate page table entries for rmrr
1918 * if context not allocated for bus
1919 * allocate and init context
1920 * set present in root table for this bus
1921 * init context with domain, translation etc
1922 * endfor
1923 * endfor
1924 */
1925 for_each_rmrr_units(rmrr) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 for (i = 0; i < rmrr->devices_cnt; i++) {
1927 pdev = rmrr->devices[i];
1928 /* some BIOS lists non-exist devices in DMAR table */
1929 if (!pdev)
1930 continue;
1931 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1932 if (ret)
1933 printk(KERN_ERR
1934 "IOMMU: mapping reserved region failed\n");
1935 }
1936 }
1937
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001938 iommu_prepare_gfx_mapping();
1939
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001940 iommu_prepare_isa();
1941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942 /*
1943 * for each drhd
1944 * enable fault log
1945 * global invalidate context cache
1946 * global invalidate iotlb
1947 * enable translation
1948 */
1949 for_each_drhd_unit(drhd) {
1950 if (drhd->ignored)
1951 continue;
1952 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001953
1954 iommu_flush_write_buffer(iommu);
1955
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001956 ret = dmar_set_interrupt(iommu);
1957 if (ret)
1958 goto error;
1959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001960 iommu_set_root_entry(iommu);
1961
Youquan Songa77b67d2008-10-16 16:31:56 -07001962 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
1963 0);
1964 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
1965 0);
mark grossf8bab732008-02-08 04:18:38 -08001966 iommu_disable_protect_mem_regions(iommu);
1967
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001968 ret = iommu_enable_translation(iommu);
1969 if (ret)
1970 goto error;
1971 }
1972
1973 return 0;
1974error:
1975 for_each_drhd_unit(drhd) {
1976 if (drhd->ignored)
1977 continue;
1978 iommu = drhd->iommu;
1979 free_iommu(iommu);
1980 }
Weidong Hand9630fe2008-12-08 11:06:32 +08001981 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001982 return ret;
1983}
1984
1985static inline u64 aligned_size(u64 host_addr, size_t size)
1986{
1987 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001988 addr = (host_addr & (~PAGE_MASK)) + size;
1989 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001990}
1991
1992struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07001993iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001994{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001995 struct iova *piova;
1996
1997 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001998 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07001999 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002000 return NULL;
2001
2002 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002003 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 return piova;
2005}
2006
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002007static struct iova *
2008__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002009 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002010{
2011 struct pci_dev *pdev = to_pci_dev(dev);
2012 struct iova *iova = NULL;
2013
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002014 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
2015 iova = iommu_alloc_iova(domain, size, dma_mask);
2016 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002017 /*
2018 * First try to allocate an io virtual address in
2019 * DMA_32BIT_MASK and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002020 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002021 */
2022 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
2023 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002024 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002025 }
2026
2027 if (!iova) {
2028 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2029 return NULL;
2030 }
2031
2032 return iova;
2033}
2034
2035static struct dmar_domain *
2036get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002037{
2038 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002039 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002040
2041 domain = get_domain_for_dev(pdev,
2042 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2043 if (!domain) {
2044 printk(KERN_ERR
2045 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002046 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002047 }
2048
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002049 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002050 if (unlikely(!domain_context_mapped(pdev))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051 ret = domain_context_mapping(domain, pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002052 if (ret) {
2053 printk(KERN_ERR
2054 "Domain context map for %s failed",
2055 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002056 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002057 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002058 }
2059
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002060 return domain;
2061}
2062
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002063static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2064 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002065{
2066 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002067 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002068 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002069 struct iova *iova;
2070 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002071 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002072 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002073
2074 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002075 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002076 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002077
2078 domain = get_valid_domain_for_dev(pdev);
2079 if (!domain)
2080 return 0;
2081
Weidong Han8c11e792008-12-08 15:29:22 +08002082 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002083 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002084
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002085 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002086 if (!iova)
2087 goto error;
2088
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002089 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002090
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091 /*
2092 * Check if DMAR supports zero-length reads on write only
2093 * mappings..
2094 */
2095 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002096 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 prot |= DMA_PTE_READ;
2098 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2099 prot |= DMA_PTE_WRITE;
2100 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002101 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002103 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002104 * is not a big problem
2105 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002106 ret = domain_page_mapping(domain, start_paddr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002107 ((u64)paddr) & PAGE_MASK, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002108 if (ret)
2109 goto error;
2110
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002111 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002112 ret = iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002113 start_paddr, size >> VTD_PAGE_SHIFT, 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002114 if (ret)
Weidong Han8c11e792008-12-08 15:29:22 +08002115 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002116
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002117 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002118
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002119error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002120 if (iova)
2121 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002123 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002124 return 0;
2125}
2126
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002127static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2128 unsigned long offset, size_t size,
2129 enum dma_data_direction dir,
2130 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002131{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002132 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2133 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002134}
2135
mark gross5e0d2a62008-03-04 15:22:08 -08002136static void flush_unmaps(void)
2137{
mark gross80b20dd2008-04-18 13:53:58 -07002138 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002139
mark gross5e0d2a62008-03-04 15:22:08 -08002140 timer_on = 0;
2141
2142 /* just flush them all */
2143 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002144 struct intel_iommu *iommu = g_iommus[i];
2145 if (!iommu)
2146 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002147
Weidong Hana2bb8452008-12-08 11:24:12 +08002148 if (deferred_flush[i].next) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002149 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2150 DMA_TLB_GLOBAL_FLUSH, 0);
mark gross80b20dd2008-04-18 13:53:58 -07002151 for (j = 0; j < deferred_flush[i].next; j++) {
2152 __free_iova(&deferred_flush[i].domain[j]->iovad,
2153 deferred_flush[i].iova[j]);
2154 }
2155 deferred_flush[i].next = 0;
2156 }
mark gross5e0d2a62008-03-04 15:22:08 -08002157 }
2158
mark gross5e0d2a62008-03-04 15:22:08 -08002159 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002160}
2161
2162static void flush_unmaps_timeout(unsigned long data)
2163{
mark gross80b20dd2008-04-18 13:53:58 -07002164 unsigned long flags;
2165
2166 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002167 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002168 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002169}
2170
2171static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2172{
2173 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002174 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002175 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002176
2177 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002178 if (list_size == HIGH_WATER_MARK)
2179 flush_unmaps();
2180
Weidong Han8c11e792008-12-08 15:29:22 +08002181 iommu = domain_get_iommu(dom);
2182 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002183
mark gross80b20dd2008-04-18 13:53:58 -07002184 next = deferred_flush[iommu_id].next;
2185 deferred_flush[iommu_id].domain[next] = dom;
2186 deferred_flush[iommu_id].iova[next] = iova;
2187 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002188
2189 if (!timer_on) {
2190 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2191 timer_on = 1;
2192 }
2193 list_size++;
2194 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2195}
2196
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002197static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2198 size_t size, enum dma_data_direction dir,
2199 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002200{
2201 struct pci_dev *pdev = to_pci_dev(dev);
2202 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002203 unsigned long start_addr;
2204 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002205 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002206
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002207 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002208 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002209 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002210 BUG_ON(!domain);
2211
Weidong Han8c11e792008-12-08 15:29:22 +08002212 iommu = domain_get_iommu(domain);
2213
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002214 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2215 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002216 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002217
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002218 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002219 size = aligned_size((u64)dev_addr, size);
2220
2221 pr_debug("Device %s unmapping: %lx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002222 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002223
2224 /* clear the whole page */
2225 dma_pte_clear_range(domain, start_addr, start_addr + size);
2226 /* free page tables */
2227 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002228 if (intel_iommu_strict) {
Weidong Han8c11e792008-12-08 15:29:22 +08002229 if (iommu_flush_iotlb_psi(iommu,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002230 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002231 iommu_flush_write_buffer(iommu);
mark gross5e0d2a62008-03-04 15:22:08 -08002232 /* free iova */
2233 __free_iova(&domain->iovad, iova);
2234 } else {
2235 add_unmap(domain, iova);
2236 /*
2237 * queue up the release of the unmap to save the 1/6th of the
2238 * cpu used up by the iotlb flush operation...
2239 */
mark gross5e0d2a62008-03-04 15:22:08 -08002240 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002241}
2242
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002243static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2244 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002245{
2246 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2247}
2248
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002249static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2250 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002251{
2252 void *vaddr;
2253 int order;
2254
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002255 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002256 order = get_order(size);
2257 flags &= ~(GFP_DMA | GFP_DMA32);
2258
2259 vaddr = (void *)__get_free_pages(flags, order);
2260 if (!vaddr)
2261 return NULL;
2262 memset(vaddr, 0, size);
2263
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002264 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2265 DMA_BIDIRECTIONAL,
2266 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002267 if (*dma_handle)
2268 return vaddr;
2269 free_pages((unsigned long)vaddr, order);
2270 return NULL;
2271}
2272
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002273static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2274 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275{
2276 int order;
2277
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002278 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002279 order = get_order(size);
2280
2281 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2282 free_pages((unsigned long)vaddr, order);
2283}
2284
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002285#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002286
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002287static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2288 int nelems, enum dma_data_direction dir,
2289 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002290{
2291 int i;
2292 struct pci_dev *pdev = to_pci_dev(hwdev);
2293 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002294 unsigned long start_addr;
2295 struct iova *iova;
2296 size_t size = 0;
2297 void *addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002298 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002299 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002300
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002301 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002302 return;
2303
2304 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002305 BUG_ON(!domain);
2306
2307 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002308
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002309 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002310 if (!iova)
2311 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002312 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002313 addr = SG_ENT_VIRT_ADDRESS(sg);
2314 size += aligned_size((u64)addr, sg->length);
2315 }
2316
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002317 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002318
2319 /* clear the whole page */
2320 dma_pte_clear_range(domain, start_addr, start_addr + size);
2321 /* free page tables */
2322 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2323
Weidong Han8c11e792008-12-08 15:29:22 +08002324 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002325 size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002326 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002327
2328 /* free iova */
2329 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002330}
2331
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002332static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002333 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002334{
2335 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002336 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002337
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002338 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002339 BUG_ON(!sg_page(sg));
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002340 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
2341 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342 }
2343 return nelems;
2344}
2345
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002346static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2347 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002348{
2349 void *addr;
2350 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002351 struct pci_dev *pdev = to_pci_dev(hwdev);
2352 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002353 size_t size = 0;
2354 int prot = 0;
2355 size_t offset = 0;
2356 struct iova *iova = NULL;
2357 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002358 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002359 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002360 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002361
2362 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002363 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002364 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002365
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002366 domain = get_valid_domain_for_dev(pdev);
2367 if (!domain)
2368 return 0;
2369
Weidong Han8c11e792008-12-08 15:29:22 +08002370 iommu = domain_get_iommu(domain);
2371
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002372 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002373 addr = SG_ENT_VIRT_ADDRESS(sg);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002374 addr = (void *)virt_to_phys(addr);
2375 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002376 }
2377
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002378 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002379 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002380 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002381 return 0;
2382 }
2383
2384 /*
2385 * Check if DMAR supports zero-length reads on write only
2386 * mappings..
2387 */
2388 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002389 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002390 prot |= DMA_PTE_READ;
2391 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2392 prot |= DMA_PTE_WRITE;
2393
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002394 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002395 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002396 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002397 addr = SG_ENT_VIRT_ADDRESS(sg);
2398 addr = (void *)virt_to_phys(addr);
2399 size = aligned_size((u64)addr, sg->length);
2400 ret = domain_page_mapping(domain, start_addr + offset,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002401 ((u64)addr) & PAGE_MASK,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002402 size, prot);
2403 if (ret) {
2404 /* clear the page */
2405 dma_pte_clear_range(domain, start_addr,
2406 start_addr + offset);
2407 /* free page tables */
2408 dma_pte_free_pagetable(domain, start_addr,
2409 start_addr + offset);
2410 /* free iova */
2411 __free_iova(&domain->iovad, iova);
2412 return 0;
2413 }
2414 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002415 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002416 sg->dma_length = sg->length;
2417 offset += size;
2418 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419
2420 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002421 if (iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002422 start_addr, offset >> VTD_PAGE_SHIFT, 1))
Weidong Han8c11e792008-12-08 15:29:22 +08002423 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002424 return nelems;
2425}
2426
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002427static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2428{
2429 return !dma_addr;
2430}
2431
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002432struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002433 .alloc_coherent = intel_alloc_coherent,
2434 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002435 .map_sg = intel_map_sg,
2436 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002437 .map_page = intel_map_page,
2438 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002439 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002440};
2441
2442static inline int iommu_domain_cache_init(void)
2443{
2444 int ret = 0;
2445
2446 iommu_domain_cache = kmem_cache_create("iommu_domain",
2447 sizeof(struct dmar_domain),
2448 0,
2449 SLAB_HWCACHE_ALIGN,
2450
2451 NULL);
2452 if (!iommu_domain_cache) {
2453 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2454 ret = -ENOMEM;
2455 }
2456
2457 return ret;
2458}
2459
2460static inline int iommu_devinfo_cache_init(void)
2461{
2462 int ret = 0;
2463
2464 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2465 sizeof(struct device_domain_info),
2466 0,
2467 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002468 NULL);
2469 if (!iommu_devinfo_cache) {
2470 printk(KERN_ERR "Couldn't create devinfo cache\n");
2471 ret = -ENOMEM;
2472 }
2473
2474 return ret;
2475}
2476
2477static inline int iommu_iova_cache_init(void)
2478{
2479 int ret = 0;
2480
2481 iommu_iova_cache = kmem_cache_create("iommu_iova",
2482 sizeof(struct iova),
2483 0,
2484 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002485 NULL);
2486 if (!iommu_iova_cache) {
2487 printk(KERN_ERR "Couldn't create iova cache\n");
2488 ret = -ENOMEM;
2489 }
2490
2491 return ret;
2492}
2493
2494static int __init iommu_init_mempool(void)
2495{
2496 int ret;
2497 ret = iommu_iova_cache_init();
2498 if (ret)
2499 return ret;
2500
2501 ret = iommu_domain_cache_init();
2502 if (ret)
2503 goto domain_error;
2504
2505 ret = iommu_devinfo_cache_init();
2506 if (!ret)
2507 return ret;
2508
2509 kmem_cache_destroy(iommu_domain_cache);
2510domain_error:
2511 kmem_cache_destroy(iommu_iova_cache);
2512
2513 return -ENOMEM;
2514}
2515
2516static void __init iommu_exit_mempool(void)
2517{
2518 kmem_cache_destroy(iommu_devinfo_cache);
2519 kmem_cache_destroy(iommu_domain_cache);
2520 kmem_cache_destroy(iommu_iova_cache);
2521
2522}
2523
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002524static void __init init_no_remapping_devices(void)
2525{
2526 struct dmar_drhd_unit *drhd;
2527
2528 for_each_drhd_unit(drhd) {
2529 if (!drhd->include_all) {
2530 int i;
2531 for (i = 0; i < drhd->devices_cnt; i++)
2532 if (drhd->devices[i] != NULL)
2533 break;
2534 /* ignore DMAR unit if no pci devices exist */
2535 if (i == drhd->devices_cnt)
2536 drhd->ignored = 1;
2537 }
2538 }
2539
2540 if (dmar_map_gfx)
2541 return;
2542
2543 for_each_drhd_unit(drhd) {
2544 int i;
2545 if (drhd->ignored || drhd->include_all)
2546 continue;
2547
2548 for (i = 0; i < drhd->devices_cnt; i++)
2549 if (drhd->devices[i] &&
2550 !IS_GFX_DEVICE(drhd->devices[i]))
2551 break;
2552
2553 if (i < drhd->devices_cnt)
2554 continue;
2555
2556 /* bypass IOMMU if it is just for gfx devices */
2557 drhd->ignored = 1;
2558 for (i = 0; i < drhd->devices_cnt; i++) {
2559 if (!drhd->devices[i])
2560 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002561 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002562 }
2563 }
2564}
2565
2566int __init intel_iommu_init(void)
2567{
2568 int ret = 0;
2569
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002570 if (dmar_table_init())
2571 return -ENODEV;
2572
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002573 if (dmar_dev_scope_init())
2574 return -ENODEV;
2575
Suresh Siddha2ae21012008-07-10 11:16:43 -07002576 /*
2577 * Check the need for DMA-remapping initialization now.
2578 * Above initialization will also be used by Interrupt-remapping.
2579 */
2580 if (no_iommu || swiotlb || dmar_disabled)
2581 return -ENODEV;
2582
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002583 iommu_init_mempool();
2584 dmar_init_reserved_ranges();
2585
2586 init_no_remapping_devices();
2587
2588 ret = init_dmars();
2589 if (ret) {
2590 printk(KERN_ERR "IOMMU: dmar init failed\n");
2591 put_iova_domain(&reserved_iova_list);
2592 iommu_exit_mempool();
2593 return ret;
2594 }
2595 printk(KERN_INFO
2596 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2597
mark gross5e0d2a62008-03-04 15:22:08 -08002598 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002599 force_iommu = 1;
2600 dma_ops = &intel_dma_ops;
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01002601
2602 register_iommu(&intel_iommu_ops);
2603
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604 return 0;
2605}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002606
Weidong Hanc7151a82008-12-08 22:51:37 +08002607static int vm_domain_add_dev_info(struct dmar_domain *domain,
2608 struct pci_dev *pdev)
2609{
2610 struct device_domain_info *info;
2611 unsigned long flags;
2612
2613 info = alloc_devinfo_mem();
2614 if (!info)
2615 return -ENOMEM;
2616
2617 info->bus = pdev->bus->number;
2618 info->devfn = pdev->devfn;
2619 info->dev = pdev;
2620 info->domain = domain;
2621
2622 spin_lock_irqsave(&device_domain_lock, flags);
2623 list_add(&info->link, &domain->devices);
2624 list_add(&info->global, &device_domain_list);
2625 pdev->dev.archdata.iommu = info;
2626 spin_unlock_irqrestore(&device_domain_lock, flags);
2627
2628 return 0;
2629}
2630
2631static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2632 struct pci_dev *pdev)
2633{
2634 struct device_domain_info *info;
2635 struct intel_iommu *iommu;
2636 unsigned long flags;
2637 int found = 0;
2638 struct list_head *entry, *tmp;
2639
2640 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2641 if (!iommu)
2642 return;
2643
2644 spin_lock_irqsave(&device_domain_lock, flags);
2645 list_for_each_safe(entry, tmp, &domain->devices) {
2646 info = list_entry(entry, struct device_domain_info, link);
2647 if (info->bus == pdev->bus->number &&
2648 info->devfn == pdev->devfn) {
2649 list_del(&info->link);
2650 list_del(&info->global);
2651 if (info->dev)
2652 info->dev->dev.archdata.iommu = NULL;
2653 spin_unlock_irqrestore(&device_domain_lock, flags);
2654
2655 iommu_detach_dev(iommu, info->bus, info->devfn);
2656 free_devinfo_mem(info);
2657
2658 spin_lock_irqsave(&device_domain_lock, flags);
2659
2660 if (found)
2661 break;
2662 else
2663 continue;
2664 }
2665
2666 /* if there is no other devices under the same iommu
2667 * owned by this domain, clear this iommu in iommu_bmp
2668 * update iommu count and coherency
2669 */
2670 if (device_to_iommu(info->bus, info->devfn) == iommu)
2671 found = 1;
2672 }
2673
2674 if (found == 0) {
2675 unsigned long tmp_flags;
2676 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2677 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2678 domain->iommu_count--;
2679 domain_update_iommu_coherency(domain);
2680 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2681 }
2682
2683 spin_unlock_irqrestore(&device_domain_lock, flags);
2684}
2685
2686static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2687{
2688 struct device_domain_info *info;
2689 struct intel_iommu *iommu;
2690 unsigned long flags1, flags2;
2691
2692 spin_lock_irqsave(&device_domain_lock, flags1);
2693 while (!list_empty(&domain->devices)) {
2694 info = list_entry(domain->devices.next,
2695 struct device_domain_info, link);
2696 list_del(&info->link);
2697 list_del(&info->global);
2698 if (info->dev)
2699 info->dev->dev.archdata.iommu = NULL;
2700
2701 spin_unlock_irqrestore(&device_domain_lock, flags1);
2702
2703 iommu = device_to_iommu(info->bus, info->devfn);
2704 iommu_detach_dev(iommu, info->bus, info->devfn);
2705
2706 /* clear this iommu in iommu_bmp, update iommu count
2707 * and coherency
2708 */
2709 spin_lock_irqsave(&domain->iommu_lock, flags2);
2710 if (test_and_clear_bit(iommu->seq_id,
2711 &domain->iommu_bmp)) {
2712 domain->iommu_count--;
2713 domain_update_iommu_coherency(domain);
2714 }
2715 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2716
2717 free_devinfo_mem(info);
2718 spin_lock_irqsave(&device_domain_lock, flags1);
2719 }
2720 spin_unlock_irqrestore(&device_domain_lock, flags1);
2721}
2722
Weidong Han5e98c4b2008-12-08 23:03:27 +08002723/* domain id for virtual machine, it won't be set in context */
2724static unsigned long vm_domid;
2725
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002726static int vm_domain_min_agaw(struct dmar_domain *domain)
2727{
2728 int i;
2729 int min_agaw = domain->agaw;
2730
2731 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
2732 for (; i < g_num_of_iommus; ) {
2733 if (min_agaw > g_iommus[i]->agaw)
2734 min_agaw = g_iommus[i]->agaw;
2735
2736 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
2737 }
2738
2739 return min_agaw;
2740}
2741
Weidong Han5e98c4b2008-12-08 23:03:27 +08002742static struct dmar_domain *iommu_alloc_vm_domain(void)
2743{
2744 struct dmar_domain *domain;
2745
2746 domain = alloc_domain_mem();
2747 if (!domain)
2748 return NULL;
2749
2750 domain->id = vm_domid++;
2751 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
2752 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
2753
2754 return domain;
2755}
2756
2757static int vm_domain_init(struct dmar_domain *domain, int guest_width)
2758{
2759 int adjust_width;
2760
2761 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
2762 spin_lock_init(&domain->mapping_lock);
2763 spin_lock_init(&domain->iommu_lock);
2764
2765 domain_reserve_special_ranges(domain);
2766
2767 /* calculate AGAW */
2768 domain->gaw = guest_width;
2769 adjust_width = guestwidth_to_adjustwidth(guest_width);
2770 domain->agaw = width_to_agaw(adjust_width);
2771
2772 INIT_LIST_HEAD(&domain->devices);
2773
2774 domain->iommu_count = 0;
2775 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002776 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08002777
2778 /* always allocate the top pgd */
2779 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
2780 if (!domain->pgd)
2781 return -ENOMEM;
2782 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2783 return 0;
2784}
2785
2786static void iommu_free_vm_domain(struct dmar_domain *domain)
2787{
2788 unsigned long flags;
2789 struct dmar_drhd_unit *drhd;
2790 struct intel_iommu *iommu;
2791 unsigned long i;
2792 unsigned long ndomains;
2793
2794 for_each_drhd_unit(drhd) {
2795 if (drhd->ignored)
2796 continue;
2797 iommu = drhd->iommu;
2798
2799 ndomains = cap_ndoms(iommu->cap);
2800 i = find_first_bit(iommu->domain_ids, ndomains);
2801 for (; i < ndomains; ) {
2802 if (iommu->domains[i] == domain) {
2803 spin_lock_irqsave(&iommu->lock, flags);
2804 clear_bit(i, iommu->domain_ids);
2805 iommu->domains[i] = NULL;
2806 spin_unlock_irqrestore(&iommu->lock, flags);
2807 break;
2808 }
2809 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
2810 }
2811 }
2812}
2813
2814static void vm_domain_exit(struct dmar_domain *domain)
2815{
2816 u64 end;
2817
2818 /* Domain 0 is reserved, so dont process it */
2819 if (!domain)
2820 return;
2821
2822 vm_domain_remove_all_dev_info(domain);
2823 /* destroy iovas */
2824 put_iova_domain(&domain->iovad);
2825 end = DOMAIN_MAX_ADDR(domain->gaw);
2826 end = end & (~VTD_PAGE_MASK);
2827
2828 /* clear ptes */
2829 dma_pte_clear_range(domain, 0, end);
2830
2831 /* free page tables */
2832 dma_pte_free_pagetable(domain, 0, end);
2833
2834 iommu_free_vm_domain(domain);
2835 free_domain_mem(domain);
2836}
2837
Joerg Roedel5d450802008-12-03 14:52:32 +01002838static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03002839{
Joerg Roedel5d450802008-12-03 14:52:32 +01002840 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03002841
Joerg Roedel5d450802008-12-03 14:52:32 +01002842 dmar_domain = iommu_alloc_vm_domain();
2843 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03002844 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01002845 "intel_iommu_domain_init: dmar_domain == NULL\n");
2846 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03002847 }
Joerg Roedel5d450802008-12-03 14:52:32 +01002848 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03002849 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01002850 "intel_iommu_domain_init() failed\n");
2851 vm_domain_exit(dmar_domain);
2852 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03002853 }
Joerg Roedel5d450802008-12-03 14:52:32 +01002854 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002855
Joerg Roedel5d450802008-12-03 14:52:32 +01002856 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03002857}
Kay, Allen M38717942008-09-09 18:37:29 +03002858
Joerg Roedel5d450802008-12-03 14:52:32 +01002859static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03002860{
Joerg Roedel5d450802008-12-03 14:52:32 +01002861 struct dmar_domain *dmar_domain = domain->priv;
2862
2863 domain->priv = NULL;
2864 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03002865}
Kay, Allen M38717942008-09-09 18:37:29 +03002866
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002867static int intel_iommu_attach_device(struct iommu_domain *domain,
2868 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03002869{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002870 struct dmar_domain *dmar_domain = domain->priv;
2871 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002872 struct intel_iommu *iommu;
2873 int addr_width;
2874 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002875 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03002876
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002877 /* normally pdev is not mapped */
2878 if (unlikely(domain_context_mapped(pdev))) {
2879 struct dmar_domain *old_domain;
2880
2881 old_domain = find_domain(pdev);
2882 if (old_domain) {
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002883 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002884 vm_domain_remove_one_dev_info(old_domain, pdev);
2885 else
2886 domain_remove_dev_info(old_domain);
2887 }
2888 }
2889
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002890 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2891 if (!iommu)
2892 return -ENODEV;
2893
2894 /* check if this iommu agaw is sufficient for max mapped address */
2895 addr_width = agaw_to_width(iommu->agaw);
2896 end = DOMAIN_MAX_ADDR(addr_width);
2897 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002898 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002899 printk(KERN_ERR "%s: iommu agaw (%d) is not "
2900 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002901 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002902 return -EFAULT;
2903 }
2904
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002905 ret = domain_context_mapping(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002906 if (ret)
2907 return ret;
2908
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002909 ret = vm_domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002910 return ret;
2911}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002912
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002913static void intel_iommu_detach_device(struct iommu_domain *domain,
2914 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03002915{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01002916 struct dmar_domain *dmar_domain = domain->priv;
2917 struct pci_dev *pdev = to_pci_dev(dev);
2918
2919 vm_domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03002920}
Kay, Allen M38717942008-09-09 18:37:29 +03002921
Joerg Roedeldde57a22008-12-03 15:04:09 +01002922static int intel_iommu_map_range(struct iommu_domain *domain,
2923 unsigned long iova, phys_addr_t hpa,
2924 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03002925{
Joerg Roedeldde57a22008-12-03 15:04:09 +01002926 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002927 u64 max_addr;
2928 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01002929 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002930 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002931
Joerg Roedeldde57a22008-12-03 15:04:09 +01002932 if (iommu_prot & IOMMU_READ)
2933 prot |= DMA_PTE_READ;
2934 if (iommu_prot & IOMMU_WRITE)
2935 prot |= DMA_PTE_WRITE;
2936
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002937 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01002938 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002939 int min_agaw;
2940 u64 end;
2941
2942 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01002943 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002944 addr_width = agaw_to_width(min_agaw);
2945 end = DOMAIN_MAX_ADDR(addr_width);
2946 end = end & VTD_PAGE_MASK;
2947 if (end < max_addr) {
2948 printk(KERN_ERR "%s: iommu agaw (%d) is not "
2949 "sufficient for the mapped address (%llx)\n",
2950 __func__, min_agaw, max_addr);
2951 return -EFAULT;
2952 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01002953 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002954 }
2955
Joerg Roedeldde57a22008-12-03 15:04:09 +01002956 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002957 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03002958}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002959
Joerg Roedeldde57a22008-12-03 15:04:09 +01002960static void intel_iommu_unmap_range(struct iommu_domain *domain,
2961 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002962{
Joerg Roedeldde57a22008-12-03 15:04:09 +01002963 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002964 dma_addr_t base;
2965
2966 /* The address might not be aligned */
2967 base = iova & VTD_PAGE_MASK;
2968 size = VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01002969 dma_pte_clear_range(dmar_domain, base, base + size);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08002970
Joerg Roedeldde57a22008-12-03 15:04:09 +01002971 if (dmar_domain->max_addr == base + size)
2972 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002973}
Kay, Allen M38717942008-09-09 18:37:29 +03002974
Joerg Roedeld14d6572008-12-03 15:06:57 +01002975static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
2976 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03002977{
Joerg Roedeld14d6572008-12-03 15:06:57 +01002978 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03002979 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002980 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03002981
Joerg Roedeld14d6572008-12-03 15:06:57 +01002982 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03002983 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002984 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03002985
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08002986 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03002987}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01002988
2989static struct iommu_ops intel_iommu_ops = {
2990 .domain_init = intel_iommu_domain_init,
2991 .domain_destroy = intel_iommu_domain_destroy,
2992 .attach_dev = intel_iommu_attach_device,
2993 .detach_dev = intel_iommu_detach_device,
2994 .map = intel_iommu_map_range,
2995 .unmap = intel_iommu_unmap_range,
2996 .iova_to_phys = intel_iommu_iova_to_phys,
2997};
David Woodhouse9af88142009-02-13 23:18:03 +00002998
2999static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3000{
3001 /*
3002 * Mobile 4 Series Chipset neglects to set RWBF capability,
3003 * but needs it:
3004 */
3005 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3006 rwbf_quirk = 1;
3007}
3008
3009DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);