blob: f0a21995b135e3d37e9e072d3866ce04d42b8a6c [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
37#include <linux/intel-iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070038#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090039#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include "pci.h"
41
Fenghua Yu5b6985c2008-10-16 18:02:32 -070042#define ROOT_SIZE VTD_PAGE_SIZE
43#define CONTEXT_SIZE VTD_PAGE_SIZE
44
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070045#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
46#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
47
48#define IOAPIC_RANGE_START (0xfee00000)
49#define IOAPIC_RANGE_END (0xfeefffff)
50#define IOVA_START_ADDR (0x1000)
51
52#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
53
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070054#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
55
Mark McLoughlinf27be032008-11-20 15:49:43 +000056#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
57#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
58#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
mark gross5e0d2a62008-03-04 15:22:08 -080059
Weidong Hand9630fe2008-12-08 11:06:32 +080060/* global iommu list, set NULL for ignored DMAR units */
61static struct intel_iommu **g_iommus;
62
Mark McLoughlin46b08e12008-11-20 15:49:44 +000063/*
64 * 0: Present
65 * 1-11: Reserved
66 * 12-63: Context Ptr (12 - (haw-1))
67 * 64-127: Reserved
68 */
69struct root_entry {
70 u64 val;
71 u64 rsvd1;
72};
73#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
74static inline bool root_present(struct root_entry *root)
75{
76 return (root->val & 1);
77}
78static inline void set_root_present(struct root_entry *root)
79{
80 root->val |= 1;
81}
82static inline void set_root_value(struct root_entry *root, unsigned long value)
83{
84 root->val |= value & VTD_PAGE_MASK;
85}
86
87static inline struct context_entry *
88get_context_addr_from_root(struct root_entry *root)
89{
90 return (struct context_entry *)
91 (root_present(root)?phys_to_virt(
92 root->val & VTD_PAGE_MASK) :
93 NULL);
94}
95
Mark McLoughlin7a8fc252008-11-20 15:49:45 +000096/*
97 * low 64 bits:
98 * 0: present
99 * 1: fault processing disable
100 * 2-3: translation type
101 * 12-63: address space root
102 * high 64 bits:
103 * 0-2: address width
104 * 3-6: aval
105 * 8-23: domain id
106 */
107struct context_entry {
108 u64 lo;
109 u64 hi;
110};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000111
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000112static inline bool context_present(struct context_entry *context)
113{
114 return (context->lo & 1);
115}
116static inline void context_set_present(struct context_entry *context)
117{
118 context->lo |= 1;
119}
120
121static inline void context_set_fault_enable(struct context_entry *context)
122{
123 context->lo &= (((u64)-1) << 2) | 1;
124}
125
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000126#define CONTEXT_TT_MULTI_LEVEL 0
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000127
128static inline void context_set_translation_type(struct context_entry *context,
129 unsigned long value)
130{
131 context->lo &= (((u64)-1) << 4) | 3;
132 context->lo |= (value & 3) << 2;
133}
134
135static inline void context_set_address_root(struct context_entry *context,
136 unsigned long value)
137{
138 context->lo |= value & VTD_PAGE_MASK;
139}
140
141static inline void context_set_address_width(struct context_entry *context,
142 unsigned long value)
143{
144 context->hi |= value & 7;
145}
146
147static inline void context_set_domain_id(struct context_entry *context,
148 unsigned long value)
149{
150 context->hi |= (value & ((1 << 16) - 1)) << 8;
151}
152
153static inline void context_clear_entry(struct context_entry *context)
154{
155 context->lo = 0;
156 context->hi = 0;
157}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000158
Mark McLoughlin622ba122008-11-20 15:49:46 +0000159/*
160 * 0: readable
161 * 1: writable
162 * 2-6: reserved
163 * 7: super page
164 * 8-11: available
165 * 12-63: Host physcial address
166 */
167struct dma_pte {
168 u64 val;
169};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000170
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000171static inline void dma_clear_pte(struct dma_pte *pte)
172{
173 pte->val = 0;
174}
175
176static inline void dma_set_pte_readable(struct dma_pte *pte)
177{
178 pte->val |= DMA_PTE_READ;
179}
180
181static inline void dma_set_pte_writable(struct dma_pte *pte)
182{
183 pte->val |= DMA_PTE_WRITE;
184}
185
186static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
187{
188 pte->val = (pte->val & ~3) | (prot & 3);
189}
190
191static inline u64 dma_pte_addr(struct dma_pte *pte)
192{
193 return (pte->val & VTD_PAGE_MASK);
194}
195
196static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
197{
198 pte->val |= (addr & VTD_PAGE_MASK);
199}
200
201static inline bool dma_pte_present(struct dma_pte *pte)
202{
203 return (pte->val & 3) != 0;
204}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000205
Weidong Han3b5410e2008-12-08 09:17:15 +0800206/* devices under the same p2p bridge are owned in one domain */
207#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 < 0)
208
Weidong Han1ce28fe2008-12-08 16:35:39 +0800209/* domain represents a virtual machine, more than one devices
210 * across iommus may be owned in one domain, e.g. kvm guest.
211 */
212#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
213
Mark McLoughlin99126f72008-11-20 15:49:47 +0000214struct dmar_domain {
215 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800216 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000217
218 struct list_head devices; /* all devices' list */
219 struct iova_domain iovad; /* iova's that belong to this domain */
220
221 struct dma_pte *pgd; /* virtual address */
222 spinlock_t mapping_lock; /* page table lock */
223 int gaw; /* max guest address width */
224
225 /* adjusted guest address width, 0 is level 2 30-bit */
226 int agaw;
227
Weidong Han3b5410e2008-12-08 09:17:15 +0800228 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800229
230 int iommu_coherency;/* indicate coherency of iommu access */
Weidong Hanc7151a82008-12-08 22:51:37 +0800231 int iommu_count; /* reference count of iommu */
232 spinlock_t iommu_lock; /* protect iommu set in domain */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000233};
234
Mark McLoughlina647dac2008-11-20 15:49:48 +0000235/* PCI domain-device relationship */
236struct device_domain_info {
237 struct list_head link; /* link to domain siblings */
238 struct list_head global; /* link to global list */
239 u8 bus; /* PCI bus numer */
240 u8 devfn; /* PCI devfn number */
241 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
242 struct dmar_domain *domain; /* pointer to domain */
243};
244
mark gross5e0d2a62008-03-04 15:22:08 -0800245static void flush_unmaps_timeout(unsigned long data);
246
247DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
248
mark gross80b20dd2008-04-18 13:53:58 -0700249#define HIGH_WATER_MARK 250
250struct deferred_flush_tables {
251 int next;
252 struct iova *iova[HIGH_WATER_MARK];
253 struct dmar_domain *domain[HIGH_WATER_MARK];
254};
255
256static struct deferred_flush_tables *deferred_flush;
257
mark gross5e0d2a62008-03-04 15:22:08 -0800258/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800259static int g_num_of_iommus;
260
261static DEFINE_SPINLOCK(async_umap_flush_lock);
262static LIST_HEAD(unmaps_to_do);
263
264static int timer_on;
265static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800266
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700267static void domain_remove_dev_info(struct dmar_domain *domain);
268
Suresh Siddha2ae21012008-07-10 11:16:43 -0700269int dmar_disabled;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700270static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700271static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800272static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700273
274#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
275static DEFINE_SPINLOCK(device_domain_lock);
276static LIST_HEAD(device_domain_list);
277
278static int __init intel_iommu_setup(char *str)
279{
280 if (!str)
281 return -EINVAL;
282 while (*str) {
283 if (!strncmp(str, "off", 3)) {
284 dmar_disabled = 1;
285 printk(KERN_INFO"Intel-IOMMU: disabled\n");
286 } else if (!strncmp(str, "igfx_off", 8)) {
287 dmar_map_gfx = 0;
288 printk(KERN_INFO
289 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700290 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800291 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700292 "Intel-IOMMU: Forcing DAC for PCI devices\n");
293 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800294 } else if (!strncmp(str, "strict", 6)) {
295 printk(KERN_INFO
296 "Intel-IOMMU: disable batched IOTLB flush\n");
297 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700298 }
299
300 str += strcspn(str, ",");
301 while (*str == ',')
302 str++;
303 }
304 return 0;
305}
306__setup("intel_iommu=", intel_iommu_setup);
307
308static struct kmem_cache *iommu_domain_cache;
309static struct kmem_cache *iommu_devinfo_cache;
310static struct kmem_cache *iommu_iova_cache;
311
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700312static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
313{
314 unsigned int flags;
315 void *vaddr;
316
317 /* trying to avoid low memory issues */
318 flags = current->flags & PF_MEMALLOC;
319 current->flags |= PF_MEMALLOC;
320 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
321 current->flags &= (~PF_MEMALLOC | flags);
322 return vaddr;
323}
324
325
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700326static inline void *alloc_pgtable_page(void)
327{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700328 unsigned int flags;
329 void *vaddr;
330
331 /* trying to avoid low memory issues */
332 flags = current->flags & PF_MEMALLOC;
333 current->flags |= PF_MEMALLOC;
334 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
335 current->flags &= (~PF_MEMALLOC | flags);
336 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700337}
338
339static inline void free_pgtable_page(void *vaddr)
340{
341 free_page((unsigned long)vaddr);
342}
343
344static inline void *alloc_domain_mem(void)
345{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700346 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700347}
348
Kay, Allen M38717942008-09-09 18:37:29 +0300349static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700350{
351 kmem_cache_free(iommu_domain_cache, vaddr);
352}
353
354static inline void * alloc_devinfo_mem(void)
355{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700356 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700357}
358
359static inline void free_devinfo_mem(void *vaddr)
360{
361 kmem_cache_free(iommu_devinfo_cache, vaddr);
362}
363
364struct iova *alloc_iova_mem(void)
365{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700366 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700367}
368
369void free_iova_mem(struct iova *iova)
370{
371 kmem_cache_free(iommu_iova_cache, iova);
372}
373
Weidong Han1b573682008-12-08 15:34:06 +0800374
375static inline int width_to_agaw(int width);
376
377/* calculate agaw for each iommu.
378 * "SAGAW" may be different across iommus, use a default agaw, and
379 * get a supported less agaw for iommus that don't support the default agaw.
380 */
381int iommu_calculate_agaw(struct intel_iommu *iommu)
382{
383 unsigned long sagaw;
384 int agaw = -1;
385
386 sagaw = cap_sagaw(iommu->cap);
387 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
388 agaw >= 0; agaw--) {
389 if (test_bit(agaw, &sagaw))
390 break;
391 }
392
393 return agaw;
394}
395
Weidong Han8c11e792008-12-08 15:29:22 +0800396/* in native case, each domain is related to only one iommu */
397static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
398{
399 int iommu_id;
400
Weidong Han1ce28fe2008-12-08 16:35:39 +0800401 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
402
Weidong Han8c11e792008-12-08 15:29:22 +0800403 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
404 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
405 return NULL;
406
407 return g_iommus[iommu_id];
408}
409
Weidong Han8e6040972008-12-08 15:49:06 +0800410/* "Coherency" capability may be different across iommus */
411static void domain_update_iommu_coherency(struct dmar_domain *domain)
412{
413 int i;
414
415 domain->iommu_coherency = 1;
416
417 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
418 for (; i < g_num_of_iommus; ) {
419 if (!ecap_coherent(g_iommus[i]->ecap)) {
420 domain->iommu_coherency = 0;
421 break;
422 }
423 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
424 }
425}
426
Weidong Hanc7151a82008-12-08 22:51:37 +0800427static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
428{
429 struct dmar_drhd_unit *drhd = NULL;
430 int i;
431
432 for_each_drhd_unit(drhd) {
433 if (drhd->ignored)
434 continue;
435
436 for (i = 0; i < drhd->devices_cnt; i++)
437 if (drhd->devices[i]->bus->number == bus &&
438 drhd->devices[i]->devfn == devfn)
439 return drhd->iommu;
440
441 if (drhd->include_all)
442 return drhd->iommu;
443 }
444
445 return NULL;
446}
447
Weidong Han5331fe62008-12-08 23:00:00 +0800448static void domain_flush_cache(struct dmar_domain *domain,
449 void *addr, int size)
450{
451 if (!domain->iommu_coherency)
452 clflush_cache_range(addr, size);
453}
454
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700455/* Gets context entry for a given bus and devfn */
456static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
457 u8 bus, u8 devfn)
458{
459 struct root_entry *root;
460 struct context_entry *context;
461 unsigned long phy_addr;
462 unsigned long flags;
463
464 spin_lock_irqsave(&iommu->lock, flags);
465 root = &iommu->root_entry[bus];
466 context = get_context_addr_from_root(root);
467 if (!context) {
468 context = (struct context_entry *)alloc_pgtable_page();
469 if (!context) {
470 spin_unlock_irqrestore(&iommu->lock, flags);
471 return NULL;
472 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700473 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700474 phy_addr = virt_to_phys((void *)context);
475 set_root_value(root, phy_addr);
476 set_root_present(root);
477 __iommu_flush_cache(iommu, root, sizeof(*root));
478 }
479 spin_unlock_irqrestore(&iommu->lock, flags);
480 return &context[devfn];
481}
482
483static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
484{
485 struct root_entry *root;
486 struct context_entry *context;
487 int ret;
488 unsigned long flags;
489
490 spin_lock_irqsave(&iommu->lock, flags);
491 root = &iommu->root_entry[bus];
492 context = get_context_addr_from_root(root);
493 if (!context) {
494 ret = 0;
495 goto out;
496 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000497 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700498out:
499 spin_unlock_irqrestore(&iommu->lock, flags);
500 return ret;
501}
502
503static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
504{
505 struct root_entry *root;
506 struct context_entry *context;
507 unsigned long flags;
508
509 spin_lock_irqsave(&iommu->lock, flags);
510 root = &iommu->root_entry[bus];
511 context = get_context_addr_from_root(root);
512 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000513 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700514 __iommu_flush_cache(iommu, &context[devfn], \
515 sizeof(*context));
516 }
517 spin_unlock_irqrestore(&iommu->lock, flags);
518}
519
520static void free_context_table(struct intel_iommu *iommu)
521{
522 struct root_entry *root;
523 int i;
524 unsigned long flags;
525 struct context_entry *context;
526
527 spin_lock_irqsave(&iommu->lock, flags);
528 if (!iommu->root_entry) {
529 goto out;
530 }
531 for (i = 0; i < ROOT_ENTRY_NR; i++) {
532 root = &iommu->root_entry[i];
533 context = get_context_addr_from_root(root);
534 if (context)
535 free_pgtable_page(context);
536 }
537 free_pgtable_page(iommu->root_entry);
538 iommu->root_entry = NULL;
539out:
540 spin_unlock_irqrestore(&iommu->lock, flags);
541}
542
543/* page table handling */
544#define LEVEL_STRIDE (9)
545#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
546
547static inline int agaw_to_level(int agaw)
548{
549 return agaw + 2;
550}
551
552static inline int agaw_to_width(int agaw)
553{
554 return 30 + agaw * LEVEL_STRIDE;
555
556}
557
558static inline int width_to_agaw(int width)
559{
560 return (width - 30) / LEVEL_STRIDE;
561}
562
563static inline unsigned int level_to_offset_bits(int level)
564{
565 return (12 + (level - 1) * LEVEL_STRIDE);
566}
567
568static inline int address_level_offset(u64 addr, int level)
569{
570 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
571}
572
573static inline u64 level_mask(int level)
574{
575 return ((u64)-1 << level_to_offset_bits(level));
576}
577
578static inline u64 level_size(int level)
579{
580 return ((u64)1 << level_to_offset_bits(level));
581}
582
583static inline u64 align_to_level(u64 addr, int level)
584{
585 return ((addr + level_size(level) - 1) & level_mask(level));
586}
587
588static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
589{
590 int addr_width = agaw_to_width(domain->agaw);
591 struct dma_pte *parent, *pte = NULL;
592 int level = agaw_to_level(domain->agaw);
593 int offset;
594 unsigned long flags;
595
596 BUG_ON(!domain->pgd);
597
598 addr &= (((u64)1) << addr_width) - 1;
599 parent = domain->pgd;
600
601 spin_lock_irqsave(&domain->mapping_lock, flags);
602 while (level > 0) {
603 void *tmp_page;
604
605 offset = address_level_offset(addr, level);
606 pte = &parent[offset];
607 if (level == 1)
608 break;
609
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000610 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700611 tmp_page = alloc_pgtable_page();
612
613 if (!tmp_page) {
614 spin_unlock_irqrestore(&domain->mapping_lock,
615 flags);
616 return NULL;
617 }
Weidong Han5331fe62008-12-08 23:00:00 +0800618 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000619 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700620 /*
621 * high level table always sets r/w, last level page
622 * table control read/write
623 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000624 dma_set_pte_readable(pte);
625 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800626 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700627 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000628 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700629 level--;
630 }
631
632 spin_unlock_irqrestore(&domain->mapping_lock, flags);
633 return pte;
634}
635
636/* return address's pte at specific level */
637static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
638 int level)
639{
640 struct dma_pte *parent, *pte = NULL;
641 int total = agaw_to_level(domain->agaw);
642 int offset;
643
644 parent = domain->pgd;
645 while (level <= total) {
646 offset = address_level_offset(addr, total);
647 pte = &parent[offset];
648 if (level == total)
649 return pte;
650
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000651 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700652 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000653 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700654 total--;
655 }
656 return NULL;
657}
658
659/* clear one page's page table */
660static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
661{
662 struct dma_pte *pte = NULL;
663
664 /* get last level pte */
665 pte = dma_addr_level_pte(domain, addr, 1);
666
667 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000668 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800669 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700670 }
671}
672
673/* clear last level pte, a tlb flush should be followed */
674static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
675{
676 int addr_width = agaw_to_width(domain->agaw);
677
678 start &= (((u64)1) << addr_width) - 1;
679 end &= (((u64)1) << addr_width) - 1;
680 /* in case it's partial page */
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700681 start = PAGE_ALIGN(start);
682 end &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683
684 /* we don't need lock here, nobody else touches the iova range */
685 while (start < end) {
686 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700687 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688 }
689}
690
691/* free page table pages. last level pte should already be cleared */
692static void dma_pte_free_pagetable(struct dmar_domain *domain,
693 u64 start, u64 end)
694{
695 int addr_width = agaw_to_width(domain->agaw);
696 struct dma_pte *pte;
697 int total = agaw_to_level(domain->agaw);
698 int level;
699 u64 tmp;
700
701 start &= (((u64)1) << addr_width) - 1;
702 end &= (((u64)1) << addr_width) - 1;
703
704 /* we don't need lock here, nobody else touches the iova range */
705 level = 2;
706 while (level <= total) {
707 tmp = align_to_level(start, level);
708 if (tmp >= end || (tmp + level_size(level) > end))
709 return;
710
711 while (tmp < end) {
712 pte = dma_addr_level_pte(domain, tmp, level);
713 if (pte) {
714 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000715 phys_to_virt(dma_pte_addr(pte)));
716 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800717 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700718 }
719 tmp += level_size(level);
720 }
721 level++;
722 }
723 /* free pgd */
724 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
725 free_pgtable_page(domain->pgd);
726 domain->pgd = NULL;
727 }
728}
729
730/* iommu handling */
731static int iommu_alloc_root_entry(struct intel_iommu *iommu)
732{
733 struct root_entry *root;
734 unsigned long flags;
735
736 root = (struct root_entry *)alloc_pgtable_page();
737 if (!root)
738 return -ENOMEM;
739
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700740 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741
742 spin_lock_irqsave(&iommu->lock, flags);
743 iommu->root_entry = root;
744 spin_unlock_irqrestore(&iommu->lock, flags);
745
746 return 0;
747}
748
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749static void iommu_set_root_entry(struct intel_iommu *iommu)
750{
751 void *addr;
752 u32 cmd, sts;
753 unsigned long flag;
754
755 addr = iommu->root_entry;
756
757 spin_lock_irqsave(&iommu->register_lock, flag);
758 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
759
760 cmd = iommu->gcmd | DMA_GCMD_SRTP;
761 writel(cmd, iommu->reg + DMAR_GCMD_REG);
762
763 /* Make sure hardware complete it */
764 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
765 readl, (sts & DMA_GSTS_RTPS), sts);
766
767 spin_unlock_irqrestore(&iommu->register_lock, flag);
768}
769
770static void iommu_flush_write_buffer(struct intel_iommu *iommu)
771{
772 u32 val;
773 unsigned long flag;
774
775 if (!cap_rwbf(iommu->cap))
776 return;
777 val = iommu->gcmd | DMA_GCMD_WBF;
778
779 spin_lock_irqsave(&iommu->register_lock, flag);
780 writel(val, iommu->reg + DMAR_GCMD_REG);
781
782 /* Make sure hardware complete it */
783 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
784 readl, (!(val & DMA_GSTS_WBFS)), val);
785
786 spin_unlock_irqrestore(&iommu->register_lock, flag);
787}
788
789/* return value determine if we need a write buffer flush */
790static int __iommu_flush_context(struct intel_iommu *iommu,
791 u16 did, u16 source_id, u8 function_mask, u64 type,
792 int non_present_entry_flush)
793{
794 u64 val = 0;
795 unsigned long flag;
796
797 /*
798 * In the non-present entry flush case, if hardware doesn't cache
799 * non-present entry we do nothing and if hardware cache non-present
800 * entry, we flush entries of domain 0 (the domain id is used to cache
801 * any non-present entries)
802 */
803 if (non_present_entry_flush) {
804 if (!cap_caching_mode(iommu->cap))
805 return 1;
806 else
807 did = 0;
808 }
809
810 switch (type) {
811 case DMA_CCMD_GLOBAL_INVL:
812 val = DMA_CCMD_GLOBAL_INVL;
813 break;
814 case DMA_CCMD_DOMAIN_INVL:
815 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
816 break;
817 case DMA_CCMD_DEVICE_INVL:
818 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
819 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
820 break;
821 default:
822 BUG();
823 }
824 val |= DMA_CCMD_ICC;
825
826 spin_lock_irqsave(&iommu->register_lock, flag);
827 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
828
829 /* Make sure hardware complete it */
830 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
831 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
832
833 spin_unlock_irqrestore(&iommu->register_lock, flag);
834
Ameya Palande4d235ba2008-10-18 20:27:30 -0700835 /* flush context entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 return 0;
837}
838
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700839/* return value determine if we need a write buffer flush */
840static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
841 u64 addr, unsigned int size_order, u64 type,
842 int non_present_entry_flush)
843{
844 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
845 u64 val = 0, val_iva = 0;
846 unsigned long flag;
847
848 /*
849 * In the non-present entry flush case, if hardware doesn't cache
850 * non-present entry we do nothing and if hardware cache non-present
851 * entry, we flush entries of domain 0 (the domain id is used to cache
852 * any non-present entries)
853 */
854 if (non_present_entry_flush) {
855 if (!cap_caching_mode(iommu->cap))
856 return 1;
857 else
858 did = 0;
859 }
860
861 switch (type) {
862 case DMA_TLB_GLOBAL_FLUSH:
863 /* global flush doesn't need set IVA_REG */
864 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
865 break;
866 case DMA_TLB_DSI_FLUSH:
867 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
868 break;
869 case DMA_TLB_PSI_FLUSH:
870 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
871 /* Note: always flush non-leaf currently */
872 val_iva = size_order | addr;
873 break;
874 default:
875 BUG();
876 }
877 /* Note: set drain read/write */
878#if 0
879 /*
880 * This is probably to be super secure.. Looks like we can
881 * ignore it without any impact.
882 */
883 if (cap_read_drain(iommu->cap))
884 val |= DMA_TLB_READ_DRAIN;
885#endif
886 if (cap_write_drain(iommu->cap))
887 val |= DMA_TLB_WRITE_DRAIN;
888
889 spin_lock_irqsave(&iommu->register_lock, flag);
890 /* Note: Only uses first TLB reg currently */
891 if (val_iva)
892 dmar_writeq(iommu->reg + tlb_offset, val_iva);
893 dmar_writeq(iommu->reg + tlb_offset + 8, val);
894
895 /* Make sure hardware complete it */
896 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
897 dmar_readq, (!(val & DMA_TLB_IVT)), val);
898
899 spin_unlock_irqrestore(&iommu->register_lock, flag);
900
901 /* check IOTLB invalidation granularity */
902 if (DMA_TLB_IAIG(val) == 0)
903 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
904 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
905 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700906 (unsigned long long)DMA_TLB_IIRG(type),
907 (unsigned long long)DMA_TLB_IAIG(val));
Ameya Palande4d235ba2008-10-18 20:27:30 -0700908 /* flush iotlb entry will implicitly flush write buffer */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909 return 0;
910}
911
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
913 u64 addr, unsigned int pages, int non_present_entry_flush)
914{
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700915 unsigned int mask;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700916
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700917 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918 BUG_ON(pages == 0);
919
920 /* Fallback to domain selective flush if no PSI support */
921 if (!cap_pgsel_inv(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700922 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
923 DMA_TLB_DSI_FLUSH,
924 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925
926 /*
927 * PSI requires page size to be 2 ^ x, and the base address is naturally
928 * aligned to the size
929 */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700930 mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931 /* Fallback to domain selective flush if size is too big */
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700932 if (mask > cap_max_amask_val(iommu->cap))
Youquan Songa77b67d2008-10-16 16:31:56 -0700933 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
934 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935
Youquan Songa77b67d2008-10-16 16:31:56 -0700936 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
937 DMA_TLB_PSI_FLUSH,
938 non_present_entry_flush);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939}
940
mark grossf8bab732008-02-08 04:18:38 -0800941static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
942{
943 u32 pmen;
944 unsigned long flags;
945
946 spin_lock_irqsave(&iommu->register_lock, flags);
947 pmen = readl(iommu->reg + DMAR_PMEN_REG);
948 pmen &= ~DMA_PMEN_EPM;
949 writel(pmen, iommu->reg + DMAR_PMEN_REG);
950
951 /* wait for the protected region status bit to clear */
952 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
953 readl, !(pmen & DMA_PMEN_PRS), pmen);
954
955 spin_unlock_irqrestore(&iommu->register_lock, flags);
956}
957
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700958static int iommu_enable_translation(struct intel_iommu *iommu)
959{
960 u32 sts;
961 unsigned long flags;
962
963 spin_lock_irqsave(&iommu->register_lock, flags);
964 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
965
966 /* Make sure hardware complete it */
967 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
968 readl, (sts & DMA_GSTS_TES), sts);
969
970 iommu->gcmd |= DMA_GCMD_TE;
971 spin_unlock_irqrestore(&iommu->register_lock, flags);
972 return 0;
973}
974
975static int iommu_disable_translation(struct intel_iommu *iommu)
976{
977 u32 sts;
978 unsigned long flag;
979
980 spin_lock_irqsave(&iommu->register_lock, flag);
981 iommu->gcmd &= ~DMA_GCMD_TE;
982 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
983
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
986 readl, (!(sts & DMA_GSTS_TES)), sts);
987
988 spin_unlock_irqrestore(&iommu->register_lock, flag);
989 return 0;
990}
991
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -0700992/* iommu interrupt handling. Most stuff are MSI-like. */
993
mark grossd94afc62008-02-08 04:18:39 -0800994static const char *fault_reason_strings[] =
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -0700995{
996 "Software",
997 "Present bit in root entry is clear",
998 "Present bit in context entry is clear",
999 "Invalid context entry",
1000 "Access beyond MGAW",
1001 "PTE Write access is not set",
1002 "PTE Read access is not set",
1003 "Next page table ptr is invalid",
1004 "Root table address invalid",
1005 "Context table ptr is invalid",
1006 "non-zero reserved fields in RTP",
1007 "non-zero reserved fields in CTP",
1008 "non-zero reserved fields in PTE",
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001009};
mark grossf8bab732008-02-08 04:18:38 -08001010#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001011
mark grossd94afc62008-02-08 04:18:39 -08001012const char *dmar_get_fault_reason(u8 fault_reason)
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001013{
mark grossd94afc62008-02-08 04:18:39 -08001014 if (fault_reason > MAX_FAULT_REASON_IDX)
1015 return "Unknown";
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001016 else
1017 return fault_reason_strings[fault_reason];
1018}
1019
1020void dmar_msi_unmask(unsigned int irq)
1021{
1022 struct intel_iommu *iommu = get_irq_data(irq);
1023 unsigned long flag;
1024
1025 /* unmask it */
1026 spin_lock_irqsave(&iommu->register_lock, flag);
1027 writel(0, iommu->reg + DMAR_FECTL_REG);
1028 /* Read a reg to force flush the post write */
1029 readl(iommu->reg + DMAR_FECTL_REG);
1030 spin_unlock_irqrestore(&iommu->register_lock, flag);
1031}
1032
1033void dmar_msi_mask(unsigned int irq)
1034{
1035 unsigned long flag;
1036 struct intel_iommu *iommu = get_irq_data(irq);
1037
1038 /* mask it */
1039 spin_lock_irqsave(&iommu->register_lock, flag);
1040 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1041 /* Read a reg to force flush the post write */
1042 readl(iommu->reg + DMAR_FECTL_REG);
1043 spin_unlock_irqrestore(&iommu->register_lock, flag);
1044}
1045
1046void dmar_msi_write(int irq, struct msi_msg *msg)
1047{
1048 struct intel_iommu *iommu = get_irq_data(irq);
1049 unsigned long flag;
1050
1051 spin_lock_irqsave(&iommu->register_lock, flag);
1052 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1053 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1054 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1055 spin_unlock_irqrestore(&iommu->register_lock, flag);
1056}
1057
1058void dmar_msi_read(int irq, struct msi_msg *msg)
1059{
1060 struct intel_iommu *iommu = get_irq_data(irq);
1061 unsigned long flag;
1062
1063 spin_lock_irqsave(&iommu->register_lock, flag);
1064 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1065 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1066 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1067 spin_unlock_irqrestore(&iommu->register_lock, flag);
1068}
1069
1070static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001071 u8 fault_reason, u16 source_id, unsigned long long addr)
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001072{
mark grossd94afc62008-02-08 04:18:39 -08001073 const char *reason;
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001074
1075 reason = dmar_get_fault_reason(fault_reason);
1076
1077 printk(KERN_ERR
1078 "DMAR:[%s] Request device [%02x:%02x.%d] "
1079 "fault addr %llx \n"
1080 "DMAR:[fault reason %02d] %s\n",
1081 (type ? "DMA Read" : "DMA Write"),
1082 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1083 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1084 return 0;
1085}
1086
1087#define PRIMARY_FAULT_REG_LEN (16)
1088static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1089{
1090 struct intel_iommu *iommu = dev_id;
1091 int reg, fault_index;
1092 u32 fault_status;
1093 unsigned long flag;
1094
1095 spin_lock_irqsave(&iommu->register_lock, flag);
1096 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1097
1098 /* TBD: ignore advanced fault log currently */
1099 if (!(fault_status & DMA_FSTS_PPF))
1100 goto clear_overflow;
1101
1102 fault_index = dma_fsts_fault_record_index(fault_status);
1103 reg = cap_fault_reg_offset(iommu->cap);
1104 while (1) {
1105 u8 fault_reason;
1106 u16 source_id;
1107 u64 guest_addr;
1108 int type;
1109 u32 data;
1110
1111 /* highest 32 bits */
1112 data = readl(iommu->reg + reg +
1113 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1114 if (!(data & DMA_FRCD_F))
1115 break;
1116
1117 fault_reason = dma_frcd_fault_reason(data);
1118 type = dma_frcd_type(data);
1119
1120 data = readl(iommu->reg + reg +
1121 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1122 source_id = dma_frcd_source_id(data);
1123
1124 guest_addr = dmar_readq(iommu->reg + reg +
1125 fault_index * PRIMARY_FAULT_REG_LEN);
1126 guest_addr = dma_frcd_page_addr(guest_addr);
1127 /* clear the fault */
1128 writel(DMA_FRCD_F, iommu->reg + reg +
1129 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1130
1131 spin_unlock_irqrestore(&iommu->register_lock, flag);
1132
1133 iommu_page_fault_do_one(iommu, type, fault_reason,
1134 source_id, guest_addr);
1135
1136 fault_index++;
1137 if (fault_index > cap_num_fault_regs(iommu->cap))
1138 fault_index = 0;
1139 spin_lock_irqsave(&iommu->register_lock, flag);
1140 }
1141clear_overflow:
1142 /* clear primary fault overflow */
1143 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1144 if (fault_status & DMA_FSTS_PFO)
1145 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1146
1147 spin_unlock_irqrestore(&iommu->register_lock, flag);
1148 return IRQ_HANDLED;
1149}
1150
1151int dmar_set_interrupt(struct intel_iommu *iommu)
1152{
1153 int irq, ret;
1154
1155 irq = create_irq();
1156 if (!irq) {
1157 printk(KERN_ERR "IOMMU: no free vectors\n");
1158 return -EINVAL;
1159 }
1160
1161 set_irq_data(irq, iommu);
1162 iommu->irq = irq;
1163
1164 ret = arch_setup_dmar_msi(irq);
1165 if (ret) {
1166 set_irq_data(irq, NULL);
1167 iommu->irq = 0;
1168 destroy_irq(irq);
1169 return 0;
1170 }
1171
1172 /* Force fault register is cleared */
1173 iommu_page_fault(irq, iommu);
1174
1175 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1176 if (ret)
1177 printk(KERN_ERR "IOMMU: can't request irq\n");
1178 return ret;
1179}
1180
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181static int iommu_init_domains(struct intel_iommu *iommu)
1182{
1183 unsigned long ndomains;
1184 unsigned long nlongs;
1185
1186 ndomains = cap_ndoms(iommu->cap);
1187 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1188 nlongs = BITS_TO_LONGS(ndomains);
1189
1190 /* TBD: there might be 64K domains,
1191 * consider other allocation for future chip
1192 */
1193 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1194 if (!iommu->domain_ids) {
1195 printk(KERN_ERR "Allocating domain id array failed\n");
1196 return -ENOMEM;
1197 }
1198 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1199 GFP_KERNEL);
1200 if (!iommu->domains) {
1201 printk(KERN_ERR "Allocating domain array failed\n");
1202 kfree(iommu->domain_ids);
1203 return -ENOMEM;
1204 }
1205
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001206 spin_lock_init(&iommu->lock);
1207
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208 /*
1209 * if Caching mode is set, then invalid translations are tagged
1210 * with domainid 0. Hence we need to pre-allocate it.
1211 */
1212 if (cap_caching_mode(iommu->cap))
1213 set_bit(0, iommu->domain_ids);
1214 return 0;
1215}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001217
1218static void domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001219
1220void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221{
1222 struct dmar_domain *domain;
1223 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001224 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001226 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1227 for (; i < cap_ndoms(iommu->cap); ) {
1228 domain = iommu->domains[i];
1229 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001230
1231 spin_lock_irqsave(&domain->iommu_lock, flags);
1232 if (--domain->iommu_count == 0)
1233 domain_exit(domain);
1234 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236 i = find_next_bit(iommu->domain_ids,
1237 cap_ndoms(iommu->cap), i+1);
1238 }
1239
1240 if (iommu->gcmd & DMA_GCMD_TE)
1241 iommu_disable_translation(iommu);
1242
1243 if (iommu->irq) {
1244 set_irq_data(iommu->irq, NULL);
1245 /* This will mask the irq */
1246 free_irq(iommu->irq, iommu);
1247 destroy_irq(iommu->irq);
1248 }
1249
1250 kfree(iommu->domains);
1251 kfree(iommu->domain_ids);
1252
Weidong Hand9630fe2008-12-08 11:06:32 +08001253 g_iommus[iommu->seq_id] = NULL;
1254
1255 /* if all iommus are freed, free g_iommus */
1256 for (i = 0; i < g_num_of_iommus; i++) {
1257 if (g_iommus[i])
1258 break;
1259 }
1260
1261 if (i == g_num_of_iommus)
1262 kfree(g_iommus);
1263
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264 /* free context mapping */
1265 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001266}
1267
1268static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1269{
1270 unsigned long num;
1271 unsigned long ndomains;
1272 struct dmar_domain *domain;
1273 unsigned long flags;
1274
1275 domain = alloc_domain_mem();
1276 if (!domain)
1277 return NULL;
1278
1279 ndomains = cap_ndoms(iommu->cap);
1280
1281 spin_lock_irqsave(&iommu->lock, flags);
1282 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1283 if (num >= ndomains) {
1284 spin_unlock_irqrestore(&iommu->lock, flags);
1285 free_domain_mem(domain);
1286 printk(KERN_ERR "IOMMU: no free domain ids\n");
1287 return NULL;
1288 }
1289
1290 set_bit(num, iommu->domain_ids);
1291 domain->id = num;
Weidong Han8c11e792008-12-08 15:29:22 +08001292 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1293 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hand71a2f32008-12-07 21:13:41 +08001294 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001295 iommu->domains[num] = domain;
1296 spin_unlock_irqrestore(&iommu->lock, flags);
1297
1298 return domain;
1299}
1300
1301static void iommu_free_domain(struct dmar_domain *domain)
1302{
1303 unsigned long flags;
Weidong Han8c11e792008-12-08 15:29:22 +08001304 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305
Weidong Han8c11e792008-12-08 15:29:22 +08001306 iommu = domain_get_iommu(domain);
1307
1308 spin_lock_irqsave(&iommu->lock, flags);
1309 clear_bit(domain->id, iommu->domain_ids);
1310 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311}
1312
1313static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001314static struct lock_class_key reserved_alloc_key;
1315static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316
1317static void dmar_init_reserved_ranges(void)
1318{
1319 struct pci_dev *pdev = NULL;
1320 struct iova *iova;
1321 int i;
1322 u64 addr, size;
1323
David Millerf6611972008-02-06 01:36:23 -08001324 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001325
Mark Gross8a443df2008-03-04 14:59:31 -08001326 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1327 &reserved_alloc_key);
1328 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1329 &reserved_rbtree_key);
1330
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001331 /* IOAPIC ranges shouldn't be accessed by DMA */
1332 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1333 IOVA_PFN(IOAPIC_RANGE_END));
1334 if (!iova)
1335 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1336
1337 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1338 for_each_pci_dev(pdev) {
1339 struct resource *r;
1340
1341 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1342 r = &pdev->resource[i];
1343 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1344 continue;
1345 addr = r->start;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001346 addr &= PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001348 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001349 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1350 IOVA_PFN(size + addr) - 1);
1351 if (!iova)
1352 printk(KERN_ERR "Reserve iova failed\n");
1353 }
1354 }
1355
1356}
1357
1358static void domain_reserve_special_ranges(struct dmar_domain *domain)
1359{
1360 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1361}
1362
1363static inline int guestwidth_to_adjustwidth(int gaw)
1364{
1365 int agaw;
1366 int r = (gaw - 12) % 9;
1367
1368 if (r == 0)
1369 agaw = gaw;
1370 else
1371 agaw = gaw + 9 - r;
1372 if (agaw > 64)
1373 agaw = 64;
1374 return agaw;
1375}
1376
1377static int domain_init(struct dmar_domain *domain, int guest_width)
1378{
1379 struct intel_iommu *iommu;
1380 int adjust_width, agaw;
1381 unsigned long sagaw;
1382
David Millerf6611972008-02-06 01:36:23 -08001383 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001385 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001386
1387 domain_reserve_special_ranges(domain);
1388
1389 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001390 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391 if (guest_width > cap_mgaw(iommu->cap))
1392 guest_width = cap_mgaw(iommu->cap);
1393 domain->gaw = guest_width;
1394 adjust_width = guestwidth_to_adjustwidth(guest_width);
1395 agaw = width_to_agaw(adjust_width);
1396 sagaw = cap_sagaw(iommu->cap);
1397 if (!test_bit(agaw, &sagaw)) {
1398 /* hardware doesn't support it, choose a bigger one */
1399 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1400 agaw = find_next_bit(&sagaw, 5, agaw);
1401 if (agaw >= 5)
1402 return -ENODEV;
1403 }
1404 domain->agaw = agaw;
1405 INIT_LIST_HEAD(&domain->devices);
1406
Weidong Han8e6040972008-12-08 15:49:06 +08001407 if (ecap_coherent(iommu->ecap))
1408 domain->iommu_coherency = 1;
1409 else
1410 domain->iommu_coherency = 0;
1411
Weidong Hanc7151a82008-12-08 22:51:37 +08001412 domain->iommu_count = 1;
1413
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 /* always allocate the top pgd */
1415 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1416 if (!domain->pgd)
1417 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001418 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419 return 0;
1420}
1421
1422static void domain_exit(struct dmar_domain *domain)
1423{
1424 u64 end;
1425
1426 /* Domain 0 is reserved, so dont process it */
1427 if (!domain)
1428 return;
1429
1430 domain_remove_dev_info(domain);
1431 /* destroy iovas */
1432 put_iova_domain(&domain->iovad);
1433 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001434 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001435
1436 /* clear ptes */
1437 dma_pte_clear_range(domain, 0, end);
1438
1439 /* free page tables */
1440 dma_pte_free_pagetable(domain, 0, end);
1441
1442 iommu_free_domain(domain);
1443 free_domain_mem(domain);
1444}
1445
1446static int domain_context_mapping_one(struct dmar_domain *domain,
1447 u8 bus, u8 devfn)
1448{
1449 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001451 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452
1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1454 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1455 BUG_ON(!domain->pgd);
Weidong Han5331fe62008-12-08 23:00:00 +08001456
1457 iommu = device_to_iommu(bus, devfn);
1458 if (!iommu)
1459 return -ENODEV;
1460
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461 context = device_to_context_entry(iommu, bus, devfn);
1462 if (!context)
1463 return -ENOMEM;
1464 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001465 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001466 spin_unlock_irqrestore(&iommu->lock, flags);
1467 return 0;
1468 }
1469
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001470 context_set_domain_id(context, domain->id);
1471 context_set_address_width(context, domain->agaw);
1472 context_set_address_root(context, virt_to_phys(domain->pgd));
1473 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1474 context_set_fault_enable(context);
1475 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001476 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001477
1478 /* it's a non-present to present mapping */
Youquan Songa77b67d2008-10-16 16:31:56 -07001479 if (iommu->flush.flush_context(iommu, domain->id,
1480 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1481 DMA_CCMD_DEVICE_INVL, 1))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482 iommu_flush_write_buffer(iommu);
1483 else
Youquan Songa77b67d2008-10-16 16:31:56 -07001484 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1485
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001486 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001487
1488 spin_lock_irqsave(&domain->iommu_lock, flags);
1489 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1490 domain->iommu_count++;
1491 domain_update_iommu_coherency(domain);
1492 }
1493 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494 return 0;
1495}
1496
1497static int
1498domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1499{
1500 int ret;
1501 struct pci_dev *tmp, *parent;
1502
1503 ret = domain_context_mapping_one(domain, pdev->bus->number,
1504 pdev->devfn);
1505 if (ret)
1506 return ret;
1507
1508 /* dependent device mapping */
1509 tmp = pci_find_upstream_pcie_bridge(pdev);
1510 if (!tmp)
1511 return 0;
1512 /* Secondary interface's bus number and devfn 0 */
1513 parent = pdev->bus->self;
1514 while (parent != tmp) {
1515 ret = domain_context_mapping_one(domain, parent->bus->number,
1516 parent->devfn);
1517 if (ret)
1518 return ret;
1519 parent = parent->bus->self;
1520 }
1521 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1522 return domain_context_mapping_one(domain,
1523 tmp->subordinate->number, 0);
1524 else /* this is a legacy PCI bridge */
1525 return domain_context_mapping_one(domain,
1526 tmp->bus->number, tmp->devfn);
1527}
1528
Weidong Han5331fe62008-12-08 23:00:00 +08001529static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530{
1531 int ret;
1532 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001533 struct intel_iommu *iommu;
1534
1535 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1536 if (!iommu)
1537 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538
Weidong Han8c11e792008-12-08 15:29:22 +08001539 ret = device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001540 pdev->bus->number, pdev->devfn);
1541 if (!ret)
1542 return ret;
1543 /* dependent device mapping */
1544 tmp = pci_find_upstream_pcie_bridge(pdev);
1545 if (!tmp)
1546 return ret;
1547 /* Secondary interface's bus number and devfn 0 */
1548 parent = pdev->bus->self;
1549 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001550 ret = device_context_mapped(iommu, parent->bus->number,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001551 parent->devfn);
1552 if (!ret)
1553 return ret;
1554 parent = parent->bus->self;
1555 }
1556 if (tmp->is_pcie)
Weidong Han8c11e792008-12-08 15:29:22 +08001557 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 tmp->subordinate->number, 0);
1559 else
Weidong Han8c11e792008-12-08 15:29:22 +08001560 return device_context_mapped(iommu,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 tmp->bus->number, tmp->devfn);
1562}
1563
1564static int
1565domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1566 u64 hpa, size_t size, int prot)
1567{
1568 u64 start_pfn, end_pfn;
1569 struct dma_pte *pte;
1570 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001571 int addr_width = agaw_to_width(domain->agaw);
1572
1573 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001574
1575 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1576 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001577 iova &= PAGE_MASK;
1578 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1579 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580 index = 0;
1581 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001582 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001583 if (!pte)
1584 return -ENOMEM;
1585 /* We don't need lock here, nobody else
1586 * touches the iova range
1587 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001588 BUG_ON(dma_pte_addr(pte));
1589 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1590 dma_set_pte_prot(pte, prot);
Weidong Han5331fe62008-12-08 23:00:00 +08001591 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001592 start_pfn++;
1593 index++;
1594 }
1595 return 0;
1596}
1597
Weidong Hanc7151a82008-12-08 22:51:37 +08001598static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599{
Weidong Hanc7151a82008-12-08 22:51:37 +08001600 if (!iommu)
1601 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001602
1603 clear_context_table(iommu, bus, devfn);
1604 iommu->flush.flush_context(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001605 DMA_CCMD_GLOBAL_INVL, 0);
Weidong Han8c11e792008-12-08 15:29:22 +08001606 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Youquan Songa77b67d2008-10-16 16:31:56 -07001607 DMA_TLB_GLOBAL_FLUSH, 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608}
1609
1610static void domain_remove_dev_info(struct dmar_domain *domain)
1611{
1612 struct device_domain_info *info;
1613 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001614 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001615
1616 spin_lock_irqsave(&device_domain_lock, flags);
1617 while (!list_empty(&domain->devices)) {
1618 info = list_entry(domain->devices.next,
1619 struct device_domain_info, link);
1620 list_del(&info->link);
1621 list_del(&info->global);
1622 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001623 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001624 spin_unlock_irqrestore(&device_domain_lock, flags);
1625
Weidong Hanc7151a82008-12-08 22:51:37 +08001626 iommu = device_to_iommu(info->bus, info->devfn);
1627 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001628 free_devinfo_mem(info);
1629
1630 spin_lock_irqsave(&device_domain_lock, flags);
1631 }
1632 spin_unlock_irqrestore(&device_domain_lock, flags);
1633}
1634
1635/*
1636 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001637 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 */
Kay, Allen M38717942008-09-09 18:37:29 +03001639static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640find_domain(struct pci_dev *pdev)
1641{
1642 struct device_domain_info *info;
1643
1644 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001645 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 if (info)
1647 return info->domain;
1648 return NULL;
1649}
1650
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651/* domain is initialized */
1652static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1653{
1654 struct dmar_domain *domain, *found = NULL;
1655 struct intel_iommu *iommu;
1656 struct dmar_drhd_unit *drhd;
1657 struct device_domain_info *info, *tmp;
1658 struct pci_dev *dev_tmp;
1659 unsigned long flags;
1660 int bus = 0, devfn = 0;
1661
1662 domain = find_domain(pdev);
1663 if (domain)
1664 return domain;
1665
1666 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1667 if (dev_tmp) {
1668 if (dev_tmp->is_pcie) {
1669 bus = dev_tmp->subordinate->number;
1670 devfn = 0;
1671 } else {
1672 bus = dev_tmp->bus->number;
1673 devfn = dev_tmp->devfn;
1674 }
1675 spin_lock_irqsave(&device_domain_lock, flags);
1676 list_for_each_entry(info, &device_domain_list, global) {
1677 if (info->bus == bus && info->devfn == devfn) {
1678 found = info->domain;
1679 break;
1680 }
1681 }
1682 spin_unlock_irqrestore(&device_domain_lock, flags);
1683 /* pcie-pci bridge already has a domain, uses it */
1684 if (found) {
1685 domain = found;
1686 goto found_domain;
1687 }
1688 }
1689
1690 /* Allocate new domain for the device */
1691 drhd = dmar_find_matched_drhd_unit(pdev);
1692 if (!drhd) {
1693 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1694 pci_name(pdev));
1695 return NULL;
1696 }
1697 iommu = drhd->iommu;
1698
1699 domain = iommu_alloc_domain(iommu);
1700 if (!domain)
1701 goto error;
1702
1703 if (domain_init(domain, gaw)) {
1704 domain_exit(domain);
1705 goto error;
1706 }
1707
1708 /* register pcie-to-pci device */
1709 if (dev_tmp) {
1710 info = alloc_devinfo_mem();
1711 if (!info) {
1712 domain_exit(domain);
1713 goto error;
1714 }
1715 info->bus = bus;
1716 info->devfn = devfn;
1717 info->dev = NULL;
1718 info->domain = domain;
1719 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001720 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721
1722 /* pcie-to-pci bridge already has a domain, uses it */
1723 found = NULL;
1724 spin_lock_irqsave(&device_domain_lock, flags);
1725 list_for_each_entry(tmp, &device_domain_list, global) {
1726 if (tmp->bus == bus && tmp->devfn == devfn) {
1727 found = tmp->domain;
1728 break;
1729 }
1730 }
1731 if (found) {
1732 free_devinfo_mem(info);
1733 domain_exit(domain);
1734 domain = found;
1735 } else {
1736 list_add(&info->link, &domain->devices);
1737 list_add(&info->global, &device_domain_list);
1738 }
1739 spin_unlock_irqrestore(&device_domain_lock, flags);
1740 }
1741
1742found_domain:
1743 info = alloc_devinfo_mem();
1744 if (!info)
1745 goto error;
1746 info->bus = pdev->bus->number;
1747 info->devfn = pdev->devfn;
1748 info->dev = pdev;
1749 info->domain = domain;
1750 spin_lock_irqsave(&device_domain_lock, flags);
1751 /* somebody is fast */
1752 found = find_domain(pdev);
1753 if (found != NULL) {
1754 spin_unlock_irqrestore(&device_domain_lock, flags);
1755 if (found != domain) {
1756 domain_exit(domain);
1757 domain = found;
1758 }
1759 free_devinfo_mem(info);
1760 return domain;
1761 }
1762 list_add(&info->link, &domain->devices);
1763 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001764 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001765 spin_unlock_irqrestore(&device_domain_lock, flags);
1766 return domain;
1767error:
1768 /* recheck it here, maybe others set it */
1769 return find_domain(pdev);
1770}
1771
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001772static int iommu_prepare_identity_map(struct pci_dev *pdev,
1773 unsigned long long start,
1774 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001775{
1776 struct dmar_domain *domain;
1777 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001778 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779 int ret;
1780
1781 printk(KERN_INFO
1782 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1783 pci_name(pdev), start, end);
1784 /* page table init */
1785 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1786 if (!domain)
1787 return -ENOMEM;
1788
1789 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001790 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001792 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1794 IOVA_PFN(base + size) - 1)) {
1795 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1796 ret = -ENOMEM;
1797 goto error;
1798 }
1799
1800 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1801 size, base, pci_name(pdev));
1802 /*
1803 * RMRR range might have overlap with physical memory range,
1804 * clear it first
1805 */
1806 dma_pte_clear_range(domain, base, base + size);
1807
1808 ret = domain_page_mapping(domain, base, base, size,
1809 DMA_PTE_READ|DMA_PTE_WRITE);
1810 if (ret)
1811 goto error;
1812
1813 /* context entry init */
1814 ret = domain_context_mapping(domain, pdev);
1815 if (!ret)
1816 return 0;
1817error:
1818 domain_exit(domain);
1819 return ret;
1820
1821}
1822
1823static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1824 struct pci_dev *pdev)
1825{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001826 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 return 0;
1828 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1829 rmrr->end_address + 1);
1830}
1831
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001832#ifdef CONFIG_DMAR_GFX_WA
Yinghai Lud52d53b2008-06-16 20:10:55 -07001833struct iommu_prepare_data {
1834 struct pci_dev *pdev;
1835 int ret;
1836};
1837
1838static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1839 unsigned long end_pfn, void *datax)
1840{
1841 struct iommu_prepare_data *data;
1842
1843 data = (struct iommu_prepare_data *)datax;
1844
1845 data->ret = iommu_prepare_identity_map(data->pdev,
1846 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1847 return data->ret;
1848
1849}
1850
1851static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1852{
1853 int nid;
1854 struct iommu_prepare_data data;
1855
1856 data.pdev = pdev;
1857 data.ret = 0;
1858
1859 for_each_online_node(nid) {
1860 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1861 if (data.ret)
1862 return data.ret;
1863 }
1864 return data.ret;
1865}
1866
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001867static void __init iommu_prepare_gfx_mapping(void)
1868{
1869 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001870 int ret;
1871
1872 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001873 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001874 !IS_GFX_DEVICE(pdev))
1875 continue;
1876 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1877 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001878 ret = iommu_prepare_with_active_regions(pdev);
1879 if (ret)
1880 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001881 }
1882}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001883#else /* !CONFIG_DMAR_GFX_WA */
1884static inline void iommu_prepare_gfx_mapping(void)
1885{
1886 return;
1887}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001888#endif
1889
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001890#ifdef CONFIG_DMAR_FLOPPY_WA
1891static inline void iommu_prepare_isa(void)
1892{
1893 struct pci_dev *pdev;
1894 int ret;
1895
1896 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1897 if (!pdev)
1898 return;
1899
1900 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1901 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1902
1903 if (ret)
1904 printk("IOMMU: Failed to create 0-64M identity map, "
1905 "floppy might not work\n");
1906
1907}
1908#else
1909static inline void iommu_prepare_isa(void)
1910{
1911 return;
1912}
1913#endif /* !CONFIG_DMAR_FLPY_WA */
1914
Mark McLoughlin519a0542008-11-20 14:21:13 +00001915static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001916{
1917 struct dmar_drhd_unit *drhd;
1918 struct dmar_rmrr_unit *rmrr;
1919 struct pci_dev *pdev;
1920 struct intel_iommu *iommu;
mark gross80b20dd2008-04-18 13:53:58 -07001921 int i, ret, unit = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001922
1923 /*
1924 * for each drhd
1925 * allocate root
1926 * initialize and program root entry to not present
1927 * endfor
1928 */
1929 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08001930 g_num_of_iommus++;
1931 /*
1932 * lock not needed as this is only incremented in the single
1933 * threaded kernel __init code path all other access are read
1934 * only
1935 */
1936 }
1937
Weidong Hand9630fe2008-12-08 11:06:32 +08001938 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1939 GFP_KERNEL);
1940 if (!g_iommus) {
1941 printk(KERN_ERR "Allocating global iommu array failed\n");
1942 ret = -ENOMEM;
1943 goto error;
1944 }
1945
mark gross80b20dd2008-04-18 13:53:58 -07001946 deferred_flush = kzalloc(g_num_of_iommus *
1947 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1948 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08001949 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08001950 ret = -ENOMEM;
1951 goto error;
1952 }
1953
mark gross5e0d2a62008-03-04 15:22:08 -08001954 for_each_drhd_unit(drhd) {
1955 if (drhd->ignored)
1956 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07001957
1958 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08001959 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001960
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001961 ret = iommu_init_domains(iommu);
1962 if (ret)
1963 goto error;
1964
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001965 /*
1966 * TBD:
1967 * we could share the same root & context tables
1968 * amoung all IOMMU's. Need to Split it later.
1969 */
1970 ret = iommu_alloc_root_entry(iommu);
1971 if (ret) {
1972 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1973 goto error;
1974 }
1975 }
1976
Youquan Songa77b67d2008-10-16 16:31:56 -07001977 for_each_drhd_unit(drhd) {
1978 if (drhd->ignored)
1979 continue;
1980
1981 iommu = drhd->iommu;
1982 if (dmar_enable_qi(iommu)) {
1983 /*
1984 * Queued Invalidate not enabled, use Register Based
1985 * Invalidate
1986 */
1987 iommu->flush.flush_context = __iommu_flush_context;
1988 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1989 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001990 "invalidation\n",
1991 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001992 } else {
1993 iommu->flush.flush_context = qi_flush_context;
1994 iommu->flush.flush_iotlb = qi_flush_iotlb;
1995 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09001996 "invalidation\n",
1997 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07001998 }
1999 }
2000
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001 /*
2002 * For each rmrr
2003 * for each dev attached to rmrr
2004 * do
2005 * locate drhd for dev, alloc domain for dev
2006 * allocate free domain
2007 * allocate page table entries for rmrr
2008 * if context not allocated for bus
2009 * allocate and init context
2010 * set present in root table for this bus
2011 * init context with domain, translation etc
2012 * endfor
2013 * endfor
2014 */
2015 for_each_rmrr_units(rmrr) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002016 for (i = 0; i < rmrr->devices_cnt; i++) {
2017 pdev = rmrr->devices[i];
2018 /* some BIOS lists non-exist devices in DMAR table */
2019 if (!pdev)
2020 continue;
2021 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2022 if (ret)
2023 printk(KERN_ERR
2024 "IOMMU: mapping reserved region failed\n");
2025 }
2026 }
2027
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002028 iommu_prepare_gfx_mapping();
2029
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002030 iommu_prepare_isa();
2031
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002032 /*
2033 * for each drhd
2034 * enable fault log
2035 * global invalidate context cache
2036 * global invalidate iotlb
2037 * enable translation
2038 */
2039 for_each_drhd_unit(drhd) {
2040 if (drhd->ignored)
2041 continue;
2042 iommu = drhd->iommu;
2043 sprintf (iommu->name, "dmar%d", unit++);
2044
2045 iommu_flush_write_buffer(iommu);
2046
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002047 ret = dmar_set_interrupt(iommu);
2048 if (ret)
2049 goto error;
2050
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051 iommu_set_root_entry(iommu);
2052
Youquan Songa77b67d2008-10-16 16:31:56 -07002053 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
2054 0);
2055 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2056 0);
mark grossf8bab732008-02-08 04:18:38 -08002057 iommu_disable_protect_mem_regions(iommu);
2058
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002059 ret = iommu_enable_translation(iommu);
2060 if (ret)
2061 goto error;
2062 }
2063
2064 return 0;
2065error:
2066 for_each_drhd_unit(drhd) {
2067 if (drhd->ignored)
2068 continue;
2069 iommu = drhd->iommu;
2070 free_iommu(iommu);
2071 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002072 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073 return ret;
2074}
2075
2076static inline u64 aligned_size(u64 host_addr, size_t size)
2077{
2078 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002079 addr = (host_addr & (~PAGE_MASK)) + size;
2080 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002081}
2082
2083struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002084iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002085{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002086 struct iova *piova;
2087
2088 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002089 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002090 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091 return NULL;
2092
2093 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002094 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002095 return piova;
2096}
2097
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002098static struct iova *
2099__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002100 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002101{
2102 struct pci_dev *pdev = to_pci_dev(dev);
2103 struct iova *iova = NULL;
2104
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002105 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
2106 iova = iommu_alloc_iova(domain, size, dma_mask);
2107 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002108 /*
2109 * First try to allocate an io virtual address in
2110 * DMA_32BIT_MASK and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002111 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002112 */
2113 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
2114 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002115 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002116 }
2117
2118 if (!iova) {
2119 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2120 return NULL;
2121 }
2122
2123 return iova;
2124}
2125
2126static struct dmar_domain *
2127get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002128{
2129 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002130 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131
2132 domain = get_domain_for_dev(pdev,
2133 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2134 if (!domain) {
2135 printk(KERN_ERR
2136 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002137 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002138 }
2139
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002141 if (unlikely(!domain_context_mapped(pdev))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142 ret = domain_context_mapping(domain, pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002143 if (ret) {
2144 printk(KERN_ERR
2145 "Domain context map for %s failed",
2146 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002147 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002148 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149 }
2150
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002151 return domain;
2152}
2153
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002154static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2155 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002156{
2157 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002158 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002159 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002160 struct iova *iova;
2161 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002162 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002163 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002164
2165 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002166 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002167 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002168
2169 domain = get_valid_domain_for_dev(pdev);
2170 if (!domain)
2171 return 0;
2172
Weidong Han8c11e792008-12-08 15:29:22 +08002173 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002174 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002175
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002176 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002177 if (!iova)
2178 goto error;
2179
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002180 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002181
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182 /*
2183 * Check if DMAR supports zero-length reads on write only
2184 * mappings..
2185 */
2186 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002187 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002188 prot |= DMA_PTE_READ;
2189 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2190 prot |= DMA_PTE_WRITE;
2191 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002192 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002193 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002194 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002195 * is not a big problem
2196 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002197 ret = domain_page_mapping(domain, start_paddr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002198 ((u64)paddr) & PAGE_MASK, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002199 if (ret)
2200 goto error;
2201
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002202 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002203 ret = iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002204 start_paddr, size >> VTD_PAGE_SHIFT, 1);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002205 if (ret)
Weidong Han8c11e792008-12-08 15:29:22 +08002206 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002207
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002208 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002209
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002210error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002211 if (iova)
2212 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002213 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002214 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002215 return 0;
2216}
2217
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002218dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
2219 size_t size, int dir)
2220{
2221 return __intel_map_single(hwdev, paddr, size, dir,
2222 to_pci_dev(hwdev)->dma_mask);
2223}
2224
mark gross5e0d2a62008-03-04 15:22:08 -08002225static void flush_unmaps(void)
2226{
mark gross80b20dd2008-04-18 13:53:58 -07002227 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002228
mark gross5e0d2a62008-03-04 15:22:08 -08002229 timer_on = 0;
2230
2231 /* just flush them all */
2232 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002233 struct intel_iommu *iommu = g_iommus[i];
2234 if (!iommu)
2235 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002236
Weidong Hana2bb8452008-12-08 11:24:12 +08002237 if (deferred_flush[i].next) {
Youquan Songa77b67d2008-10-16 16:31:56 -07002238 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2239 DMA_TLB_GLOBAL_FLUSH, 0);
mark gross80b20dd2008-04-18 13:53:58 -07002240 for (j = 0; j < deferred_flush[i].next; j++) {
2241 __free_iova(&deferred_flush[i].domain[j]->iovad,
2242 deferred_flush[i].iova[j]);
2243 }
2244 deferred_flush[i].next = 0;
2245 }
mark gross5e0d2a62008-03-04 15:22:08 -08002246 }
2247
mark gross5e0d2a62008-03-04 15:22:08 -08002248 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002249}
2250
2251static void flush_unmaps_timeout(unsigned long data)
2252{
mark gross80b20dd2008-04-18 13:53:58 -07002253 unsigned long flags;
2254
2255 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002256 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002257 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002258}
2259
2260static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2261{
2262 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002263 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002264 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002265
2266 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002267 if (list_size == HIGH_WATER_MARK)
2268 flush_unmaps();
2269
Weidong Han8c11e792008-12-08 15:29:22 +08002270 iommu = domain_get_iommu(dom);
2271 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002272
mark gross80b20dd2008-04-18 13:53:58 -07002273 next = deferred_flush[iommu_id].next;
2274 deferred_flush[iommu_id].domain[next] = dom;
2275 deferred_flush[iommu_id].iova[next] = iova;
2276 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002277
2278 if (!timer_on) {
2279 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2280 timer_on = 1;
2281 }
2282 list_size++;
2283 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2284}
2285
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002286void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2287 int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002288{
2289 struct pci_dev *pdev = to_pci_dev(dev);
2290 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002291 unsigned long start_addr;
2292 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002293 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002294
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002295 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002296 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002297 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002298 BUG_ON(!domain);
2299
Weidong Han8c11e792008-12-08 15:29:22 +08002300 iommu = domain_get_iommu(domain);
2301
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002302 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2303 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002304 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002305
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002306 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002307 size = aligned_size((u64)dev_addr, size);
2308
2309 pr_debug("Device %s unmapping: %lx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002310 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002311
2312 /* clear the whole page */
2313 dma_pte_clear_range(domain, start_addr, start_addr + size);
2314 /* free page tables */
2315 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002316 if (intel_iommu_strict) {
Weidong Han8c11e792008-12-08 15:29:22 +08002317 if (iommu_flush_iotlb_psi(iommu,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002318 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002319 iommu_flush_write_buffer(iommu);
mark gross5e0d2a62008-03-04 15:22:08 -08002320 /* free iova */
2321 __free_iova(&domain->iovad, iova);
2322 } else {
2323 add_unmap(domain, iova);
2324 /*
2325 * queue up the release of the unmap to save the 1/6th of the
2326 * cpu used up by the iotlb flush operation...
2327 */
mark gross5e0d2a62008-03-04 15:22:08 -08002328 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002329}
2330
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002331void *intel_alloc_coherent(struct device *hwdev, size_t size,
2332 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002333{
2334 void *vaddr;
2335 int order;
2336
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002337 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002338 order = get_order(size);
2339 flags &= ~(GFP_DMA | GFP_DMA32);
2340
2341 vaddr = (void *)__get_free_pages(flags, order);
2342 if (!vaddr)
2343 return NULL;
2344 memset(vaddr, 0, size);
2345
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002346 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2347 DMA_BIDIRECTIONAL,
2348 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002349 if (*dma_handle)
2350 return vaddr;
2351 free_pages((unsigned long)vaddr, order);
2352 return NULL;
2353}
2354
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002355void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2356 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002357{
2358 int order;
2359
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002360 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002361 order = get_order(size);
2362
2363 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2364 free_pages((unsigned long)vaddr, order);
2365}
2366
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002367#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002368
2369void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2370 int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002371{
2372 int i;
2373 struct pci_dev *pdev = to_pci_dev(hwdev);
2374 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002375 unsigned long start_addr;
2376 struct iova *iova;
2377 size_t size = 0;
2378 void *addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002379 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002380 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002381
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002382 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002383 return;
2384
2385 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002386 BUG_ON(!domain);
2387
2388 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002390 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002391 if (!iova)
2392 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002393 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002394 addr = SG_ENT_VIRT_ADDRESS(sg);
2395 size += aligned_size((u64)addr, sg->length);
2396 }
2397
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002398 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002399
2400 /* clear the whole page */
2401 dma_pte_clear_range(domain, start_addr, start_addr + size);
2402 /* free page tables */
2403 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2404
Weidong Han8c11e792008-12-08 15:29:22 +08002405 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002406 size >> VTD_PAGE_SHIFT, 0))
Weidong Han8c11e792008-12-08 15:29:22 +08002407 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002408
2409 /* free iova */
2410 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411}
2412
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002413static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002414 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002415{
2416 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002417 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002418
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002419 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002420 BUG_ON(!sg_page(sg));
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002421 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
2422 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002423 }
2424 return nelems;
2425}
2426
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002427int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2428 int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002429{
2430 void *addr;
2431 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002432 struct pci_dev *pdev = to_pci_dev(hwdev);
2433 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002434 size_t size = 0;
2435 int prot = 0;
2436 size_t offset = 0;
2437 struct iova *iova = NULL;
2438 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002439 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002440 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002441 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002442
2443 BUG_ON(dir == DMA_NONE);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002444 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002445 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002446
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002447 domain = get_valid_domain_for_dev(pdev);
2448 if (!domain)
2449 return 0;
2450
Weidong Han8c11e792008-12-08 15:29:22 +08002451 iommu = domain_get_iommu(domain);
2452
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002453 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002454 addr = SG_ENT_VIRT_ADDRESS(sg);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002455 addr = (void *)virt_to_phys(addr);
2456 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002457 }
2458
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002459 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002460 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002461 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002462 return 0;
2463 }
2464
2465 /*
2466 * Check if DMAR supports zero-length reads on write only
2467 * mappings..
2468 */
2469 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002470 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002471 prot |= DMA_PTE_READ;
2472 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2473 prot |= DMA_PTE_WRITE;
2474
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002475 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002476 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002477 for_each_sg(sglist, sg, nelems, i) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002478 addr = SG_ENT_VIRT_ADDRESS(sg);
2479 addr = (void *)virt_to_phys(addr);
2480 size = aligned_size((u64)addr, sg->length);
2481 ret = domain_page_mapping(domain, start_addr + offset,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002482 ((u64)addr) & PAGE_MASK,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002483 size, prot);
2484 if (ret) {
2485 /* clear the page */
2486 dma_pte_clear_range(domain, start_addr,
2487 start_addr + offset);
2488 /* free page tables */
2489 dma_pte_free_pagetable(domain, start_addr,
2490 start_addr + offset);
2491 /* free iova */
2492 __free_iova(&domain->iovad, iova);
2493 return 0;
2494 }
2495 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002496 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002497 sg->dma_length = sg->length;
2498 offset += size;
2499 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002500
2501 /* it's a non-present to present mapping */
Weidong Han8c11e792008-12-08 15:29:22 +08002502 if (iommu_flush_iotlb_psi(iommu, domain->id,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002503 start_addr, offset >> VTD_PAGE_SHIFT, 1))
Weidong Han8c11e792008-12-08 15:29:22 +08002504 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505 return nelems;
2506}
2507
2508static struct dma_mapping_ops intel_dma_ops = {
2509 .alloc_coherent = intel_alloc_coherent,
2510 .free_coherent = intel_free_coherent,
2511 .map_single = intel_map_single,
2512 .unmap_single = intel_unmap_single,
2513 .map_sg = intel_map_sg,
2514 .unmap_sg = intel_unmap_sg,
2515};
2516
2517static inline int iommu_domain_cache_init(void)
2518{
2519 int ret = 0;
2520
2521 iommu_domain_cache = kmem_cache_create("iommu_domain",
2522 sizeof(struct dmar_domain),
2523 0,
2524 SLAB_HWCACHE_ALIGN,
2525
2526 NULL);
2527 if (!iommu_domain_cache) {
2528 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2529 ret = -ENOMEM;
2530 }
2531
2532 return ret;
2533}
2534
2535static inline int iommu_devinfo_cache_init(void)
2536{
2537 int ret = 0;
2538
2539 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2540 sizeof(struct device_domain_info),
2541 0,
2542 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002543 NULL);
2544 if (!iommu_devinfo_cache) {
2545 printk(KERN_ERR "Couldn't create devinfo cache\n");
2546 ret = -ENOMEM;
2547 }
2548
2549 return ret;
2550}
2551
2552static inline int iommu_iova_cache_init(void)
2553{
2554 int ret = 0;
2555
2556 iommu_iova_cache = kmem_cache_create("iommu_iova",
2557 sizeof(struct iova),
2558 0,
2559 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002560 NULL);
2561 if (!iommu_iova_cache) {
2562 printk(KERN_ERR "Couldn't create iova cache\n");
2563 ret = -ENOMEM;
2564 }
2565
2566 return ret;
2567}
2568
2569static int __init iommu_init_mempool(void)
2570{
2571 int ret;
2572 ret = iommu_iova_cache_init();
2573 if (ret)
2574 return ret;
2575
2576 ret = iommu_domain_cache_init();
2577 if (ret)
2578 goto domain_error;
2579
2580 ret = iommu_devinfo_cache_init();
2581 if (!ret)
2582 return ret;
2583
2584 kmem_cache_destroy(iommu_domain_cache);
2585domain_error:
2586 kmem_cache_destroy(iommu_iova_cache);
2587
2588 return -ENOMEM;
2589}
2590
2591static void __init iommu_exit_mempool(void)
2592{
2593 kmem_cache_destroy(iommu_devinfo_cache);
2594 kmem_cache_destroy(iommu_domain_cache);
2595 kmem_cache_destroy(iommu_iova_cache);
2596
2597}
2598
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002599static void __init init_no_remapping_devices(void)
2600{
2601 struct dmar_drhd_unit *drhd;
2602
2603 for_each_drhd_unit(drhd) {
2604 if (!drhd->include_all) {
2605 int i;
2606 for (i = 0; i < drhd->devices_cnt; i++)
2607 if (drhd->devices[i] != NULL)
2608 break;
2609 /* ignore DMAR unit if no pci devices exist */
2610 if (i == drhd->devices_cnt)
2611 drhd->ignored = 1;
2612 }
2613 }
2614
2615 if (dmar_map_gfx)
2616 return;
2617
2618 for_each_drhd_unit(drhd) {
2619 int i;
2620 if (drhd->ignored || drhd->include_all)
2621 continue;
2622
2623 for (i = 0; i < drhd->devices_cnt; i++)
2624 if (drhd->devices[i] &&
2625 !IS_GFX_DEVICE(drhd->devices[i]))
2626 break;
2627
2628 if (i < drhd->devices_cnt)
2629 continue;
2630
2631 /* bypass IOMMU if it is just for gfx devices */
2632 drhd->ignored = 1;
2633 for (i = 0; i < drhd->devices_cnt; i++) {
2634 if (!drhd->devices[i])
2635 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002636 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002637 }
2638 }
2639}
2640
2641int __init intel_iommu_init(void)
2642{
2643 int ret = 0;
2644
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002645 if (dmar_table_init())
2646 return -ENODEV;
2647
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002648 if (dmar_dev_scope_init())
2649 return -ENODEV;
2650
Suresh Siddha2ae21012008-07-10 11:16:43 -07002651 /*
2652 * Check the need for DMA-remapping initialization now.
2653 * Above initialization will also be used by Interrupt-remapping.
2654 */
2655 if (no_iommu || swiotlb || dmar_disabled)
2656 return -ENODEV;
2657
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658 iommu_init_mempool();
2659 dmar_init_reserved_ranges();
2660
2661 init_no_remapping_devices();
2662
2663 ret = init_dmars();
2664 if (ret) {
2665 printk(KERN_ERR "IOMMU: dmar init failed\n");
2666 put_iova_domain(&reserved_iova_list);
2667 iommu_exit_mempool();
2668 return ret;
2669 }
2670 printk(KERN_INFO
2671 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2672
mark gross5e0d2a62008-03-04 15:22:08 -08002673 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002674 force_iommu = 1;
2675 dma_ops = &intel_dma_ops;
2676 return 0;
2677}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002678
Weidong Hanc7151a82008-12-08 22:51:37 +08002679static int vm_domain_add_dev_info(struct dmar_domain *domain,
2680 struct pci_dev *pdev)
2681{
2682 struct device_domain_info *info;
2683 unsigned long flags;
2684
2685 info = alloc_devinfo_mem();
2686 if (!info)
2687 return -ENOMEM;
2688
2689 info->bus = pdev->bus->number;
2690 info->devfn = pdev->devfn;
2691 info->dev = pdev;
2692 info->domain = domain;
2693
2694 spin_lock_irqsave(&device_domain_lock, flags);
2695 list_add(&info->link, &domain->devices);
2696 list_add(&info->global, &device_domain_list);
2697 pdev->dev.archdata.iommu = info;
2698 spin_unlock_irqrestore(&device_domain_lock, flags);
2699
2700 return 0;
2701}
2702
2703static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2704 struct pci_dev *pdev)
2705{
2706 struct device_domain_info *info;
2707 struct intel_iommu *iommu;
2708 unsigned long flags;
2709 int found = 0;
2710 struct list_head *entry, *tmp;
2711
2712 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2713 if (!iommu)
2714 return;
2715
2716 spin_lock_irqsave(&device_domain_lock, flags);
2717 list_for_each_safe(entry, tmp, &domain->devices) {
2718 info = list_entry(entry, struct device_domain_info, link);
2719 if (info->bus == pdev->bus->number &&
2720 info->devfn == pdev->devfn) {
2721 list_del(&info->link);
2722 list_del(&info->global);
2723 if (info->dev)
2724 info->dev->dev.archdata.iommu = NULL;
2725 spin_unlock_irqrestore(&device_domain_lock, flags);
2726
2727 iommu_detach_dev(iommu, info->bus, info->devfn);
2728 free_devinfo_mem(info);
2729
2730 spin_lock_irqsave(&device_domain_lock, flags);
2731
2732 if (found)
2733 break;
2734 else
2735 continue;
2736 }
2737
2738 /* if there is no other devices under the same iommu
2739 * owned by this domain, clear this iommu in iommu_bmp
2740 * update iommu count and coherency
2741 */
2742 if (device_to_iommu(info->bus, info->devfn) == iommu)
2743 found = 1;
2744 }
2745
2746 if (found == 0) {
2747 unsigned long tmp_flags;
2748 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2749 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2750 domain->iommu_count--;
2751 domain_update_iommu_coherency(domain);
2752 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2753 }
2754
2755 spin_unlock_irqrestore(&device_domain_lock, flags);
2756}
2757
2758static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2759{
2760 struct device_domain_info *info;
2761 struct intel_iommu *iommu;
2762 unsigned long flags1, flags2;
2763
2764 spin_lock_irqsave(&device_domain_lock, flags1);
2765 while (!list_empty(&domain->devices)) {
2766 info = list_entry(domain->devices.next,
2767 struct device_domain_info, link);
2768 list_del(&info->link);
2769 list_del(&info->global);
2770 if (info->dev)
2771 info->dev->dev.archdata.iommu = NULL;
2772
2773 spin_unlock_irqrestore(&device_domain_lock, flags1);
2774
2775 iommu = device_to_iommu(info->bus, info->devfn);
2776 iommu_detach_dev(iommu, info->bus, info->devfn);
2777
2778 /* clear this iommu in iommu_bmp, update iommu count
2779 * and coherency
2780 */
2781 spin_lock_irqsave(&domain->iommu_lock, flags2);
2782 if (test_and_clear_bit(iommu->seq_id,
2783 &domain->iommu_bmp)) {
2784 domain->iommu_count--;
2785 domain_update_iommu_coherency(domain);
2786 }
2787 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2788
2789 free_devinfo_mem(info);
2790 spin_lock_irqsave(&device_domain_lock, flags1);
2791 }
2792 spin_unlock_irqrestore(&device_domain_lock, flags1);
2793}
2794
Kay, Allen M38717942008-09-09 18:37:29 +03002795void intel_iommu_domain_exit(struct dmar_domain *domain)
2796{
2797 u64 end;
2798
2799 /* Domain 0 is reserved, so dont process it */
2800 if (!domain)
2801 return;
2802
2803 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002804 end = end & (~VTD_PAGE_MASK);
Kay, Allen M38717942008-09-09 18:37:29 +03002805
2806 /* clear ptes */
2807 dma_pte_clear_range(domain, 0, end);
2808
2809 /* free page tables */
2810 dma_pte_free_pagetable(domain, 0, end);
2811
2812 iommu_free_domain(domain);
2813 free_domain_mem(domain);
2814}
2815EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
2816
2817struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
2818{
2819 struct dmar_drhd_unit *drhd;
2820 struct dmar_domain *domain;
2821 struct intel_iommu *iommu;
2822
2823 drhd = dmar_find_matched_drhd_unit(pdev);
2824 if (!drhd) {
2825 printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
2826 return NULL;
2827 }
2828
2829 iommu = drhd->iommu;
2830 if (!iommu) {
2831 printk(KERN_ERR
2832 "intel_iommu_domain_alloc: iommu == NULL\n");
2833 return NULL;
2834 }
2835 domain = iommu_alloc_domain(iommu);
2836 if (!domain) {
2837 printk(KERN_ERR
2838 "intel_iommu_domain_alloc: domain == NULL\n");
2839 return NULL;
2840 }
2841 if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2842 printk(KERN_ERR
2843 "intel_iommu_domain_alloc: domain_init() failed\n");
2844 intel_iommu_domain_exit(domain);
2845 return NULL;
2846 }
2847 return domain;
2848}
2849EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
2850
2851int intel_iommu_context_mapping(
2852 struct dmar_domain *domain, struct pci_dev *pdev)
2853{
2854 int rc;
2855 rc = domain_context_mapping(domain, pdev);
2856 return rc;
2857}
2858EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
2859
2860int intel_iommu_page_mapping(
2861 struct dmar_domain *domain, dma_addr_t iova,
2862 u64 hpa, size_t size, int prot)
2863{
2864 int rc;
2865 rc = domain_page_mapping(domain, iova, hpa, size, prot);
2866 return rc;
2867}
2868EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
2869
2870void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
2871{
Weidong Hanc7151a82008-12-08 22:51:37 +08002872 struct intel_iommu *iommu;
2873
2874 iommu = device_to_iommu(bus, devfn);
2875 iommu_detach_dev(iommu, bus, devfn);
Kay, Allen M38717942008-09-09 18:37:29 +03002876}
2877EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
2878
2879struct dmar_domain *
2880intel_iommu_find_domain(struct pci_dev *pdev)
2881{
2882 return find_domain(pdev);
2883}
2884EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
2885
2886int intel_iommu_found(void)
2887{
2888 return g_num_of_iommus;
2889}
2890EXPORT_SYMBOL_GPL(intel_iommu_found);
2891
2892u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2893{
2894 struct dma_pte *pte;
2895 u64 pfn;
2896
2897 pfn = 0;
2898 pte = addr_to_dma_pte(domain, iova);
2899
2900 if (pte)
Mark McLoughlin19c239c2008-11-21 16:56:53 +00002901 pfn = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03002902
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002903 return pfn >> VTD_PAGE_SHIFT;
Kay, Allen M38717942008-09-09 18:37:29 +03002904}
2905EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);