blob: e53eacd75c8daf5d6096947ef646a75162fb6bf1 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Fenghua Yu2c2e2c32009-06-19 13:47:29 -070042#include <asm/e820.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070043#include "pci.h"
44
Fenghua Yu5b6985c2008-10-16 18:02:32 -070045#define ROOT_SIZE VTD_PAGE_SIZE
46#define CONTEXT_SIZE VTD_PAGE_SIZE
47
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070048#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50
51#define IOAPIC_RANGE_START (0xfee00000)
52#define IOAPIC_RANGE_END (0xfeefffff)
53#define IOVA_START_ADDR (0x1000)
54
55#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070057#define MAX_AGAW_WIDTH 64
58
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070059#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
60
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065#ifndef PHYSICAL_PAGE_MASK
66#define PHYSICAL_PAGE_MASK PAGE_MASK
67#endif
68
Weidong Hand9630fe2008-12-08 11:06:32 +080069/* global iommu list, set NULL for ignored DMAR units */
70static struct intel_iommu **g_iommus;
71
David Woodhouse9af88142009-02-13 23:18:03 +000072static int rwbf_quirk;
73
Mark McLoughlin46b08e12008-11-20 15:49:44 +000074/*
75 * 0: Present
76 * 1-11: Reserved
77 * 12-63: Context Ptr (12 - (haw-1))
78 * 64-127: Reserved
79 */
80struct root_entry {
81 u64 val;
82 u64 rsvd1;
83};
84#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
85static inline bool root_present(struct root_entry *root)
86{
87 return (root->val & 1);
88}
89static inline void set_root_present(struct root_entry *root)
90{
91 root->val |= 1;
92}
93static inline void set_root_value(struct root_entry *root, unsigned long value)
94{
95 root->val |= value & VTD_PAGE_MASK;
96}
97
98static inline struct context_entry *
99get_context_addr_from_root(struct root_entry *root)
100{
101 return (struct context_entry *)
102 (root_present(root)?phys_to_virt(
103 root->val & VTD_PAGE_MASK) :
104 NULL);
105}
106
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000107/*
108 * low 64 bits:
109 * 0: present
110 * 1: fault processing disable
111 * 2-3: translation type
112 * 12-63: address space root
113 * high 64 bits:
114 * 0-2: address width
115 * 3-6: aval
116 * 8-23: domain id
117 */
118struct context_entry {
119 u64 lo;
120 u64 hi;
121};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000122
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000123static inline bool context_present(struct context_entry *context)
124{
125 return (context->lo & 1);
126}
127static inline void context_set_present(struct context_entry *context)
128{
129 context->lo |= 1;
130}
131
132static inline void context_set_fault_enable(struct context_entry *context)
133{
134 context->lo &= (((u64)-1) << 2) | 1;
135}
136
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000137static inline void context_set_translation_type(struct context_entry *context,
138 unsigned long value)
139{
140 context->lo &= (((u64)-1) << 4) | 3;
141 context->lo |= (value & 3) << 2;
142}
143
144static inline void context_set_address_root(struct context_entry *context,
145 unsigned long value)
146{
147 context->lo |= value & VTD_PAGE_MASK;
148}
149
150static inline void context_set_address_width(struct context_entry *context,
151 unsigned long value)
152{
153 context->hi |= value & 7;
154}
155
156static inline void context_set_domain_id(struct context_entry *context,
157 unsigned long value)
158{
159 context->hi |= (value & ((1 << 16) - 1)) << 8;
160}
161
162static inline void context_clear_entry(struct context_entry *context)
163{
164 context->lo = 0;
165 context->hi = 0;
166}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000167
Mark McLoughlin622ba122008-11-20 15:49:46 +0000168/*
169 * 0: readable
170 * 1: writable
171 * 2-6: reserved
172 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800173 * 8-10: available
174 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000175 * 12-63: Host physcial address
176 */
177struct dma_pte {
178 u64 val;
179};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000180
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000181static inline void dma_clear_pte(struct dma_pte *pte)
182{
183 pte->val = 0;
184}
185
186static inline void dma_set_pte_readable(struct dma_pte *pte)
187{
188 pte->val |= DMA_PTE_READ;
189}
190
191static inline void dma_set_pte_writable(struct dma_pte *pte)
192{
193 pte->val |= DMA_PTE_WRITE;
194}
195
Sheng Yang9cf06692009-03-18 15:33:07 +0800196static inline void dma_set_pte_snp(struct dma_pte *pte)
197{
198 pte->val |= DMA_PTE_SNP;
199}
200
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000201static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
202{
203 pte->val = (pte->val & ~3) | (prot & 3);
204}
205
206static inline u64 dma_pte_addr(struct dma_pte *pte)
207{
208 return (pte->val & VTD_PAGE_MASK);
209}
210
211static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
212{
213 pte->val |= (addr & VTD_PAGE_MASK);
214}
215
216static inline bool dma_pte_present(struct dma_pte *pte)
217{
218 return (pte->val & 3) != 0;
219}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000220
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700221/*
222 * This domain is a statically identity mapping domain.
223 * 1. This domain creats a static 1:1 mapping to all usable memory.
224 * 2. It maps to each iommu if successful.
225 * 3. Each iommu mapps to this domain if successful.
226 */
227struct dmar_domain *si_domain;
228
Weidong Han3b5410e2008-12-08 09:17:15 +0800229/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100230#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800231
Weidong Han1ce28fe2008-12-08 16:35:39 +0800232/* domain represents a virtual machine, more than one devices
233 * across iommus may be owned in one domain, e.g. kvm guest.
234 */
235#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
236
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700237/* si_domain contains mulitple devices */
238#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
239
Mark McLoughlin99126f72008-11-20 15:49:47 +0000240struct dmar_domain {
241 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800242 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000243
244 struct list_head devices; /* all devices' list */
245 struct iova_domain iovad; /* iova's that belong to this domain */
246
247 struct dma_pte *pgd; /* virtual address */
248 spinlock_t mapping_lock; /* page table lock */
249 int gaw; /* max guest address width */
250
251 /* adjusted guest address width, 0 is level 2 30-bit */
252 int agaw;
253
Weidong Han3b5410e2008-12-08 09:17:15 +0800254 int flags; /* flags to find out type of domain */
Weidong Han8e604092008-12-08 15:49:06 +0800255
256 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800257 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800258 int iommu_count; /* reference count of iommu */
259 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800260 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000261};
262
Mark McLoughlina647dac2008-11-20 15:49:48 +0000263/* PCI domain-device relationship */
264struct device_domain_info {
265 struct list_head link; /* link to domain siblings */
266 struct list_head global; /* link to global list */
David Woodhouse276dbf92009-04-04 01:45:37 +0100267 int segment; /* PCI domain */
268 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000269 u8 devfn; /* PCI devfn number */
270 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800271 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000272 struct dmar_domain *domain; /* pointer to domain */
273};
274
mark gross5e0d2a62008-03-04 15:22:08 -0800275static void flush_unmaps_timeout(unsigned long data);
276
277DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
278
mark gross80b20dd2008-04-18 13:53:58 -0700279#define HIGH_WATER_MARK 250
280struct deferred_flush_tables {
281 int next;
282 struct iova *iova[HIGH_WATER_MARK];
283 struct dmar_domain *domain[HIGH_WATER_MARK];
284};
285
286static struct deferred_flush_tables *deferred_flush;
287
mark gross5e0d2a62008-03-04 15:22:08 -0800288/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800289static int g_num_of_iommus;
290
291static DEFINE_SPINLOCK(async_umap_flush_lock);
292static LIST_HEAD(unmaps_to_do);
293
294static int timer_on;
295static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800296
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700297static void domain_remove_dev_info(struct dmar_domain *domain);
298
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800299#ifdef CONFIG_DMAR_DEFAULT_ON
300int dmar_disabled = 0;
301#else
302int dmar_disabled = 1;
303#endif /*CONFIG_DMAR_DEFAULT_ON*/
304
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700305static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700306static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800307static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700308
309#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
310static DEFINE_SPINLOCK(device_domain_lock);
311static LIST_HEAD(device_domain_list);
312
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100313static struct iommu_ops intel_iommu_ops;
314
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700315static int __init intel_iommu_setup(char *str)
316{
317 if (!str)
318 return -EINVAL;
319 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800320 if (!strncmp(str, "on", 2)) {
321 dmar_disabled = 0;
322 printk(KERN_INFO "Intel-IOMMU: enabled\n");
323 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700324 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800325 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700326 } else if (!strncmp(str, "igfx_off", 8)) {
327 dmar_map_gfx = 0;
328 printk(KERN_INFO
329 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700330 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800331 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700332 "Intel-IOMMU: Forcing DAC for PCI devices\n");
333 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800334 } else if (!strncmp(str, "strict", 6)) {
335 printk(KERN_INFO
336 "Intel-IOMMU: disable batched IOTLB flush\n");
337 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700338 }
339
340 str += strcspn(str, ",");
341 while (*str == ',')
342 str++;
343 }
344 return 0;
345}
346__setup("intel_iommu=", intel_iommu_setup);
347
348static struct kmem_cache *iommu_domain_cache;
349static struct kmem_cache *iommu_devinfo_cache;
350static struct kmem_cache *iommu_iova_cache;
351
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700352static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
353{
354 unsigned int flags;
355 void *vaddr;
356
357 /* trying to avoid low memory issues */
358 flags = current->flags & PF_MEMALLOC;
359 current->flags |= PF_MEMALLOC;
360 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
361 current->flags &= (~PF_MEMALLOC | flags);
362 return vaddr;
363}
364
365
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700366static inline void *alloc_pgtable_page(void)
367{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700368 unsigned int flags;
369 void *vaddr;
370
371 /* trying to avoid low memory issues */
372 flags = current->flags & PF_MEMALLOC;
373 current->flags |= PF_MEMALLOC;
374 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
375 current->flags &= (~PF_MEMALLOC | flags);
376 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700377}
378
379static inline void free_pgtable_page(void *vaddr)
380{
381 free_page((unsigned long)vaddr);
382}
383
384static inline void *alloc_domain_mem(void)
385{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700386 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700387}
388
Kay, Allen M38717942008-09-09 18:37:29 +0300389static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700390{
391 kmem_cache_free(iommu_domain_cache, vaddr);
392}
393
394static inline void * alloc_devinfo_mem(void)
395{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700396 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700397}
398
399static inline void free_devinfo_mem(void *vaddr)
400{
401 kmem_cache_free(iommu_devinfo_cache, vaddr);
402}
403
404struct iova *alloc_iova_mem(void)
405{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700406 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407}
408
409void free_iova_mem(struct iova *iova)
410{
411 kmem_cache_free(iommu_iova_cache, iova);
412}
413
Weidong Han1b573682008-12-08 15:34:06 +0800414
415static inline int width_to_agaw(int width);
416
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700417static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800418{
419 unsigned long sagaw;
420 int agaw = -1;
421
422 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700423 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800424 agaw >= 0; agaw--) {
425 if (test_bit(agaw, &sagaw))
426 break;
427 }
428
429 return agaw;
430}
431
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700432/*
433 * Calculate max SAGAW for each iommu.
434 */
435int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
436{
437 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
438}
439
440/*
441 * calculate agaw for each iommu.
442 * "SAGAW" may be different across iommus, use a default agaw, and
443 * get a supported less agaw for iommus that don't support the default agaw.
444 */
445int iommu_calculate_agaw(struct intel_iommu *iommu)
446{
447 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
448}
449
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700450/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800451static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
452{
453 int iommu_id;
454
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700455 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800456 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700457 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800458
Weidong Han8c11e792008-12-08 15:29:22 +0800459 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
460 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
461 return NULL;
462
463 return g_iommus[iommu_id];
464}
465
Weidong Han8e604092008-12-08 15:49:06 +0800466static void domain_update_iommu_coherency(struct dmar_domain *domain)
467{
468 int i;
469
470 domain->iommu_coherency = 1;
471
472 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
473 for (; i < g_num_of_iommus; ) {
474 if (!ecap_coherent(g_iommus[i]->ecap)) {
475 domain->iommu_coherency = 0;
476 break;
477 }
478 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
479 }
480}
481
Sheng Yang58c610b2009-03-18 15:33:05 +0800482static void domain_update_iommu_snooping(struct dmar_domain *domain)
483{
484 int i;
485
486 domain->iommu_snooping = 1;
487
488 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
489 for (; i < g_num_of_iommus; ) {
490 if (!ecap_sc_support(g_iommus[i]->ecap)) {
491 domain->iommu_snooping = 0;
492 break;
493 }
494 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
495 }
496}
497
498/* Some capabilities may be different across iommus */
499static void domain_update_iommu_cap(struct dmar_domain *domain)
500{
501 domain_update_iommu_coherency(domain);
502 domain_update_iommu_snooping(domain);
503}
504
David Woodhouse276dbf92009-04-04 01:45:37 +0100505static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800506{
507 struct dmar_drhd_unit *drhd = NULL;
508 int i;
509
510 for_each_drhd_unit(drhd) {
511 if (drhd->ignored)
512 continue;
David Woodhouse276dbf92009-04-04 01:45:37 +0100513 if (segment != drhd->segment)
514 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800515
David Woodhouse924b6232009-04-04 00:39:25 +0100516 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000517 if (drhd->devices[i] &&
518 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800519 drhd->devices[i]->devfn == devfn)
520 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700521 if (drhd->devices[i] &&
522 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100523 drhd->devices[i]->subordinate->number <= bus &&
524 drhd->devices[i]->subordinate->subordinate >= bus)
525 return drhd->iommu;
526 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800527
528 if (drhd->include_all)
529 return drhd->iommu;
530 }
531
532 return NULL;
533}
534
Weidong Han5331fe62008-12-08 23:00:00 +0800535static void domain_flush_cache(struct dmar_domain *domain,
536 void *addr, int size)
537{
538 if (!domain->iommu_coherency)
539 clflush_cache_range(addr, size);
540}
541
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700542/* Gets context entry for a given bus and devfn */
543static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
544 u8 bus, u8 devfn)
545{
546 struct root_entry *root;
547 struct context_entry *context;
548 unsigned long phy_addr;
549 unsigned long flags;
550
551 spin_lock_irqsave(&iommu->lock, flags);
552 root = &iommu->root_entry[bus];
553 context = get_context_addr_from_root(root);
554 if (!context) {
555 context = (struct context_entry *)alloc_pgtable_page();
556 if (!context) {
557 spin_unlock_irqrestore(&iommu->lock, flags);
558 return NULL;
559 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700560 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700561 phy_addr = virt_to_phys((void *)context);
562 set_root_value(root, phy_addr);
563 set_root_present(root);
564 __iommu_flush_cache(iommu, root, sizeof(*root));
565 }
566 spin_unlock_irqrestore(&iommu->lock, flags);
567 return &context[devfn];
568}
569
570static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
571{
572 struct root_entry *root;
573 struct context_entry *context;
574 int ret;
575 unsigned long flags;
576
577 spin_lock_irqsave(&iommu->lock, flags);
578 root = &iommu->root_entry[bus];
579 context = get_context_addr_from_root(root);
580 if (!context) {
581 ret = 0;
582 goto out;
583 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000584 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700585out:
586 spin_unlock_irqrestore(&iommu->lock, flags);
587 return ret;
588}
589
590static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
591{
592 struct root_entry *root;
593 struct context_entry *context;
594 unsigned long flags;
595
596 spin_lock_irqsave(&iommu->lock, flags);
597 root = &iommu->root_entry[bus];
598 context = get_context_addr_from_root(root);
599 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000600 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700601 __iommu_flush_cache(iommu, &context[devfn], \
602 sizeof(*context));
603 }
604 spin_unlock_irqrestore(&iommu->lock, flags);
605}
606
607static void free_context_table(struct intel_iommu *iommu)
608{
609 struct root_entry *root;
610 int i;
611 unsigned long flags;
612 struct context_entry *context;
613
614 spin_lock_irqsave(&iommu->lock, flags);
615 if (!iommu->root_entry) {
616 goto out;
617 }
618 for (i = 0; i < ROOT_ENTRY_NR; i++) {
619 root = &iommu->root_entry[i];
620 context = get_context_addr_from_root(root);
621 if (context)
622 free_pgtable_page(context);
623 }
624 free_pgtable_page(iommu->root_entry);
625 iommu->root_entry = NULL;
626out:
627 spin_unlock_irqrestore(&iommu->lock, flags);
628}
629
630/* page table handling */
631#define LEVEL_STRIDE (9)
632#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
633
634static inline int agaw_to_level(int agaw)
635{
636 return agaw + 2;
637}
638
639static inline int agaw_to_width(int agaw)
640{
641 return 30 + agaw * LEVEL_STRIDE;
642
643}
644
645static inline int width_to_agaw(int width)
646{
647 return (width - 30) / LEVEL_STRIDE;
648}
649
650static inline unsigned int level_to_offset_bits(int level)
651{
652 return (12 + (level - 1) * LEVEL_STRIDE);
653}
654
655static inline int address_level_offset(u64 addr, int level)
656{
657 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
658}
659
660static inline u64 level_mask(int level)
661{
662 return ((u64)-1 << level_to_offset_bits(level));
663}
664
665static inline u64 level_size(int level)
666{
667 return ((u64)1 << level_to_offset_bits(level));
668}
669
670static inline u64 align_to_level(u64 addr, int level)
671{
672 return ((addr + level_size(level) - 1) & level_mask(level));
673}
674
675static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
676{
677 int addr_width = agaw_to_width(domain->agaw);
678 struct dma_pte *parent, *pte = NULL;
679 int level = agaw_to_level(domain->agaw);
680 int offset;
681 unsigned long flags;
682
683 BUG_ON(!domain->pgd);
684
685 addr &= (((u64)1) << addr_width) - 1;
686 parent = domain->pgd;
687
688 spin_lock_irqsave(&domain->mapping_lock, flags);
689 while (level > 0) {
690 void *tmp_page;
691
692 offset = address_level_offset(addr, level);
693 pte = &parent[offset];
694 if (level == 1)
695 break;
696
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000697 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698 tmp_page = alloc_pgtable_page();
699
700 if (!tmp_page) {
701 spin_unlock_irqrestore(&domain->mapping_lock,
702 flags);
703 return NULL;
704 }
Weidong Han5331fe62008-12-08 23:00:00 +0800705 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000706 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700707 /*
708 * high level table always sets r/w, last level page
709 * table control read/write
710 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000711 dma_set_pte_readable(pte);
712 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800713 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700714 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000715 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716 level--;
717 }
718
719 spin_unlock_irqrestore(&domain->mapping_lock, flags);
720 return pte;
721}
722
723/* return address's pte at specific level */
724static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
725 int level)
726{
727 struct dma_pte *parent, *pte = NULL;
728 int total = agaw_to_level(domain->agaw);
729 int offset;
730
731 parent = domain->pgd;
732 while (level <= total) {
733 offset = address_level_offset(addr, total);
734 pte = &parent[offset];
735 if (level == total)
736 return pte;
737
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000738 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700739 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000740 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700741 total--;
742 }
743 return NULL;
744}
745
746/* clear one page's page table */
747static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
748{
749 struct dma_pte *pte = NULL;
750
751 /* get last level pte */
752 pte = dma_addr_level_pte(domain, addr, 1);
753
754 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000755 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800756 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 }
758}
759
760/* clear last level pte, a tlb flush should be followed */
761static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
762{
763 int addr_width = agaw_to_width(domain->agaw);
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800764 int npages;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765
766 start &= (((u64)1) << addr_width) - 1;
767 end &= (((u64)1) << addr_width) - 1;
768 /* in case it's partial page */
Fenghua Yu31d35682009-04-06 11:21:49 -0700769 start &= PAGE_MASK;
770 end = PAGE_ALIGN(end);
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800771 npages = (end - start) / VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772
773 /* we don't need lock here, nobody else touches the iova range */
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800774 while (npages--) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700775 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700776 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 }
778}
779
780/* free page table pages. last level pte should already be cleared */
781static void dma_pte_free_pagetable(struct dmar_domain *domain,
782 u64 start, u64 end)
783{
784 int addr_width = agaw_to_width(domain->agaw);
785 struct dma_pte *pte;
786 int total = agaw_to_level(domain->agaw);
787 int level;
788 u64 tmp;
789
790 start &= (((u64)1) << addr_width) - 1;
791 end &= (((u64)1) << addr_width) - 1;
792
793 /* we don't need lock here, nobody else touches the iova range */
794 level = 2;
795 while (level <= total) {
796 tmp = align_to_level(start, level);
797 if (tmp >= end || (tmp + level_size(level) > end))
798 return;
799
800 while (tmp < end) {
801 pte = dma_addr_level_pte(domain, tmp, level);
802 if (pte) {
803 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000804 phys_to_virt(dma_pte_addr(pte)));
805 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800806 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807 }
808 tmp += level_size(level);
809 }
810 level++;
811 }
812 /* free pgd */
813 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
814 free_pgtable_page(domain->pgd);
815 domain->pgd = NULL;
816 }
817}
818
819/* iommu handling */
820static int iommu_alloc_root_entry(struct intel_iommu *iommu)
821{
822 struct root_entry *root;
823 unsigned long flags;
824
825 root = (struct root_entry *)alloc_pgtable_page();
826 if (!root)
827 return -ENOMEM;
828
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700829 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700830
831 spin_lock_irqsave(&iommu->lock, flags);
832 iommu->root_entry = root;
833 spin_unlock_irqrestore(&iommu->lock, flags);
834
835 return 0;
836}
837
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838static void iommu_set_root_entry(struct intel_iommu *iommu)
839{
840 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100841 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700842 unsigned long flag;
843
844 addr = iommu->root_entry;
845
846 spin_lock_irqsave(&iommu->register_lock, flag);
847 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
848
David Woodhousec416daa2009-05-10 20:30:58 +0100849 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700850
851 /* Make sure hardware complete it */
852 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100853 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854
855 spin_unlock_irqrestore(&iommu->register_lock, flag);
856}
857
858static void iommu_flush_write_buffer(struct intel_iommu *iommu)
859{
860 u32 val;
861 unsigned long flag;
862
David Woodhouse9af88142009-02-13 23:18:03 +0000863 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700865
866 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100867 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868
869 /* Make sure hardware complete it */
870 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100871 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872
873 spin_unlock_irqrestore(&iommu->register_lock, flag);
874}
875
876/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100877static void __iommu_flush_context(struct intel_iommu *iommu,
878 u16 did, u16 source_id, u8 function_mask,
879 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700880{
881 u64 val = 0;
882 unsigned long flag;
883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884 switch (type) {
885 case DMA_CCMD_GLOBAL_INVL:
886 val = DMA_CCMD_GLOBAL_INVL;
887 break;
888 case DMA_CCMD_DOMAIN_INVL:
889 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
890 break;
891 case DMA_CCMD_DEVICE_INVL:
892 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
893 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
894 break;
895 default:
896 BUG();
897 }
898 val |= DMA_CCMD_ICC;
899
900 spin_lock_irqsave(&iommu->register_lock, flag);
901 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
902
903 /* Make sure hardware complete it */
904 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
905 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
906
907 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908}
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100911static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
912 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700913{
914 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
915 u64 val = 0, val_iva = 0;
916 unsigned long flag;
917
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918 switch (type) {
919 case DMA_TLB_GLOBAL_FLUSH:
920 /* global flush doesn't need set IVA_REG */
921 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
922 break;
923 case DMA_TLB_DSI_FLUSH:
924 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
925 break;
926 case DMA_TLB_PSI_FLUSH:
927 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
928 /* Note: always flush non-leaf currently */
929 val_iva = size_order | addr;
930 break;
931 default:
932 BUG();
933 }
934 /* Note: set drain read/write */
935#if 0
936 /*
937 * This is probably to be super secure.. Looks like we can
938 * ignore it without any impact.
939 */
940 if (cap_read_drain(iommu->cap))
941 val |= DMA_TLB_READ_DRAIN;
942#endif
943 if (cap_write_drain(iommu->cap))
944 val |= DMA_TLB_WRITE_DRAIN;
945
946 spin_lock_irqsave(&iommu->register_lock, flag);
947 /* Note: Only uses first TLB reg currently */
948 if (val_iva)
949 dmar_writeq(iommu->reg + tlb_offset, val_iva);
950 dmar_writeq(iommu->reg + tlb_offset + 8, val);
951
952 /* Make sure hardware complete it */
953 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
954 dmar_readq, (!(val & DMA_TLB_IVT)), val);
955
956 spin_unlock_irqrestore(&iommu->register_lock, flag);
957
958 /* check IOTLB invalidation granularity */
959 if (DMA_TLB_IAIG(val) == 0)
960 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
961 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
962 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700963 (unsigned long long)DMA_TLB_IIRG(type),
964 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965}
966
Yu Zhao93a23a72009-05-18 13:51:37 +0800967static struct device_domain_info *iommu_support_dev_iotlb(
968 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969{
Yu Zhao93a23a72009-05-18 13:51:37 +0800970 int found = 0;
971 unsigned long flags;
972 struct device_domain_info *info;
973 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
974
975 if (!ecap_dev_iotlb_support(iommu->ecap))
976 return NULL;
977
978 if (!iommu->qi)
979 return NULL;
980
981 spin_lock_irqsave(&device_domain_lock, flags);
982 list_for_each_entry(info, &domain->devices, link)
983 if (info->bus == bus && info->devfn == devfn) {
984 found = 1;
985 break;
986 }
987 spin_unlock_irqrestore(&device_domain_lock, flags);
988
989 if (!found || !info->dev)
990 return NULL;
991
992 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
993 return NULL;
994
995 if (!dmar_find_matched_atsr_unit(info->dev))
996 return NULL;
997
998 info->iommu = iommu;
999
1000 return info;
1001}
1002
1003static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1004{
1005 if (!info)
1006 return;
1007
1008 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1009}
1010
1011static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1012{
1013 if (!info->dev || !pci_ats_enabled(info->dev))
1014 return;
1015
1016 pci_disable_ats(info->dev);
1017}
1018
1019static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1020 u64 addr, unsigned mask)
1021{
1022 u16 sid, qdep;
1023 unsigned long flags;
1024 struct device_domain_info *info;
1025
1026 spin_lock_irqsave(&device_domain_lock, flags);
1027 list_for_each_entry(info, &domain->devices, link) {
1028 if (!info->dev || !pci_ats_enabled(info->dev))
1029 continue;
1030
1031 sid = info->bus << 8 | info->devfn;
1032 qdep = pci_ats_queue_depth(info->dev);
1033 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1034 }
1035 spin_unlock_irqrestore(&device_domain_lock, flags);
1036}
1037
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001038static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1039 u64 addr, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001040{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001041 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001042
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001043 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044 BUG_ON(pages == 0);
1045
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001046 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001047 * Fallback to domain selective flush if no PSI support or the size is
1048 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001049 * PSI requires page size to be 2 ^ x, and the base address is naturally
1050 * aligned to the size
1051 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001052 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1053 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001054 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001055 else
1056 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1057 DMA_TLB_PSI_FLUSH);
Yu Zhao93a23a72009-05-18 13:51:37 +08001058 if (did)
1059 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001060}
1061
mark grossf8bab732008-02-08 04:18:38 -08001062static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1063{
1064 u32 pmen;
1065 unsigned long flags;
1066
1067 spin_lock_irqsave(&iommu->register_lock, flags);
1068 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1069 pmen &= ~DMA_PMEN_EPM;
1070 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1071
1072 /* wait for the protected region status bit to clear */
1073 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1074 readl, !(pmen & DMA_PMEN_PRS), pmen);
1075
1076 spin_unlock_irqrestore(&iommu->register_lock, flags);
1077}
1078
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001079static int iommu_enable_translation(struct intel_iommu *iommu)
1080{
1081 u32 sts;
1082 unsigned long flags;
1083
1084 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001085 iommu->gcmd |= DMA_GCMD_TE;
1086 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001087
1088 /* Make sure hardware complete it */
1089 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001090 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001091
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001092 spin_unlock_irqrestore(&iommu->register_lock, flags);
1093 return 0;
1094}
1095
1096static int iommu_disable_translation(struct intel_iommu *iommu)
1097{
1098 u32 sts;
1099 unsigned long flag;
1100
1101 spin_lock_irqsave(&iommu->register_lock, flag);
1102 iommu->gcmd &= ~DMA_GCMD_TE;
1103 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1104
1105 /* Make sure hardware complete it */
1106 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001107 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001108
1109 spin_unlock_irqrestore(&iommu->register_lock, flag);
1110 return 0;
1111}
1112
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001113
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001114static int iommu_init_domains(struct intel_iommu *iommu)
1115{
1116 unsigned long ndomains;
1117 unsigned long nlongs;
1118
1119 ndomains = cap_ndoms(iommu->cap);
1120 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1121 nlongs = BITS_TO_LONGS(ndomains);
1122
1123 /* TBD: there might be 64K domains,
1124 * consider other allocation for future chip
1125 */
1126 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1127 if (!iommu->domain_ids) {
1128 printk(KERN_ERR "Allocating domain id array failed\n");
1129 return -ENOMEM;
1130 }
1131 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1132 GFP_KERNEL);
1133 if (!iommu->domains) {
1134 printk(KERN_ERR "Allocating domain array failed\n");
1135 kfree(iommu->domain_ids);
1136 return -ENOMEM;
1137 }
1138
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001139 spin_lock_init(&iommu->lock);
1140
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 /*
1142 * if Caching mode is set, then invalid translations are tagged
1143 * with domainid 0. Hence we need to pre-allocate it.
1144 */
1145 if (cap_caching_mode(iommu->cap))
1146 set_bit(0, iommu->domain_ids);
1147 return 0;
1148}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001150
1151static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001152static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001153
1154void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155{
1156 struct dmar_domain *domain;
1157 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001158 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001160 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1161 for (; i < cap_ndoms(iommu->cap); ) {
1162 domain = iommu->domains[i];
1163 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001164
1165 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001166 if (--domain->iommu_count == 0) {
1167 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1168 vm_domain_exit(domain);
1169 else
1170 domain_exit(domain);
1171 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001172 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1173
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001174 i = find_next_bit(iommu->domain_ids,
1175 cap_ndoms(iommu->cap), i+1);
1176 }
1177
1178 if (iommu->gcmd & DMA_GCMD_TE)
1179 iommu_disable_translation(iommu);
1180
1181 if (iommu->irq) {
1182 set_irq_data(iommu->irq, NULL);
1183 /* This will mask the irq */
1184 free_irq(iommu->irq, iommu);
1185 destroy_irq(iommu->irq);
1186 }
1187
1188 kfree(iommu->domains);
1189 kfree(iommu->domain_ids);
1190
Weidong Hand9630fe2008-12-08 11:06:32 +08001191 g_iommus[iommu->seq_id] = NULL;
1192
1193 /* if all iommus are freed, free g_iommus */
1194 for (i = 0; i < g_num_of_iommus; i++) {
1195 if (g_iommus[i])
1196 break;
1197 }
1198
1199 if (i == g_num_of_iommus)
1200 kfree(g_iommus);
1201
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001202 /* free context mapping */
1203 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204}
1205
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001206static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209
1210 domain = alloc_domain_mem();
1211 if (!domain)
1212 return NULL;
1213
Weidong Han8c11e792008-12-08 15:29:22 +08001214 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001215 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216
1217 return domain;
1218}
1219
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001220static int iommu_attach_domain(struct dmar_domain *domain,
1221 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001223 int num;
1224 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001225 unsigned long flags;
1226
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001227 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001228
1229 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001230
1231 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1232 if (num >= ndomains) {
1233 spin_unlock_irqrestore(&iommu->lock, flags);
1234 printk(KERN_ERR "IOMMU: no free domain ids\n");
1235 return -ENOMEM;
1236 }
1237
1238 domain->id = num;
1239 set_bit(num, iommu->domain_ids);
1240 set_bit(iommu->seq_id, &domain->iommu_bmp);
1241 iommu->domains[num] = domain;
1242 spin_unlock_irqrestore(&iommu->lock, flags);
1243
1244 return 0;
1245}
1246
1247static void iommu_detach_domain(struct dmar_domain *domain,
1248 struct intel_iommu *iommu)
1249{
1250 unsigned long flags;
1251 int num, ndomains;
1252 int found = 0;
1253
1254 spin_lock_irqsave(&iommu->lock, flags);
1255 ndomains = cap_ndoms(iommu->cap);
1256 num = find_first_bit(iommu->domain_ids, ndomains);
1257 for (; num < ndomains; ) {
1258 if (iommu->domains[num] == domain) {
1259 found = 1;
1260 break;
1261 }
1262 num = find_next_bit(iommu->domain_ids,
1263 cap_ndoms(iommu->cap), num+1);
1264 }
1265
1266 if (found) {
1267 clear_bit(num, iommu->domain_ids);
1268 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1269 iommu->domains[num] = NULL;
1270 }
Weidong Han8c11e792008-12-08 15:29:22 +08001271 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001272}
1273
1274static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001275static struct lock_class_key reserved_alloc_key;
1276static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277
1278static void dmar_init_reserved_ranges(void)
1279{
1280 struct pci_dev *pdev = NULL;
1281 struct iova *iova;
1282 int i;
1283 u64 addr, size;
1284
David Millerf6611972008-02-06 01:36:23 -08001285 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001286
Mark Gross8a443df2008-03-04 14:59:31 -08001287 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1288 &reserved_alloc_key);
1289 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1290 &reserved_rbtree_key);
1291
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001292 /* IOAPIC ranges shouldn't be accessed by DMA */
1293 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1294 IOVA_PFN(IOAPIC_RANGE_END));
1295 if (!iova)
1296 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1297
1298 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1299 for_each_pci_dev(pdev) {
1300 struct resource *r;
1301
1302 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1303 r = &pdev->resource[i];
1304 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1305 continue;
1306 addr = r->start;
David Woodhousefd18de52009-05-10 23:57:41 +01001307 addr &= PHYSICAL_PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001309 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1311 IOVA_PFN(size + addr) - 1);
1312 if (!iova)
1313 printk(KERN_ERR "Reserve iova failed\n");
1314 }
1315 }
1316
1317}
1318
1319static void domain_reserve_special_ranges(struct dmar_domain *domain)
1320{
1321 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1322}
1323
1324static inline int guestwidth_to_adjustwidth(int gaw)
1325{
1326 int agaw;
1327 int r = (gaw - 12) % 9;
1328
1329 if (r == 0)
1330 agaw = gaw;
1331 else
1332 agaw = gaw + 9 - r;
1333 if (agaw > 64)
1334 agaw = 64;
1335 return agaw;
1336}
1337
1338static int domain_init(struct dmar_domain *domain, int guest_width)
1339{
1340 struct intel_iommu *iommu;
1341 int adjust_width, agaw;
1342 unsigned long sagaw;
1343
David Millerf6611972008-02-06 01:36:23 -08001344 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001345 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001346 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001347
1348 domain_reserve_special_ranges(domain);
1349
1350 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001351 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001352 if (guest_width > cap_mgaw(iommu->cap))
1353 guest_width = cap_mgaw(iommu->cap);
1354 domain->gaw = guest_width;
1355 adjust_width = guestwidth_to_adjustwidth(guest_width);
1356 agaw = width_to_agaw(adjust_width);
1357 sagaw = cap_sagaw(iommu->cap);
1358 if (!test_bit(agaw, &sagaw)) {
1359 /* hardware doesn't support it, choose a bigger one */
1360 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1361 agaw = find_next_bit(&sagaw, 5, agaw);
1362 if (agaw >= 5)
1363 return -ENODEV;
1364 }
1365 domain->agaw = agaw;
1366 INIT_LIST_HEAD(&domain->devices);
1367
Weidong Han8e604092008-12-08 15:49:06 +08001368 if (ecap_coherent(iommu->ecap))
1369 domain->iommu_coherency = 1;
1370 else
1371 domain->iommu_coherency = 0;
1372
Sheng Yang58c610b2009-03-18 15:33:05 +08001373 if (ecap_sc_support(iommu->ecap))
1374 domain->iommu_snooping = 1;
1375 else
1376 domain->iommu_snooping = 0;
1377
Weidong Hanc7151a82008-12-08 22:51:37 +08001378 domain->iommu_count = 1;
1379
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001380 /* always allocate the top pgd */
1381 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1382 if (!domain->pgd)
1383 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001384 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385 return 0;
1386}
1387
1388static void domain_exit(struct dmar_domain *domain)
1389{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001390 struct dmar_drhd_unit *drhd;
1391 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001392 u64 end;
1393
1394 /* Domain 0 is reserved, so dont process it */
1395 if (!domain)
1396 return;
1397
1398 domain_remove_dev_info(domain);
1399 /* destroy iovas */
1400 put_iova_domain(&domain->iovad);
1401 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001402 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403
1404 /* clear ptes */
1405 dma_pte_clear_range(domain, 0, end);
1406
1407 /* free page tables */
1408 dma_pte_free_pagetable(domain, 0, end);
1409
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001410 for_each_active_iommu(iommu, drhd)
1411 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1412 iommu_detach_domain(domain, iommu);
1413
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001414 free_domain_mem(domain);
1415}
1416
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001417static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1418 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419{
1420 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001422 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001423 struct dma_pte *pgd;
1424 unsigned long num;
1425 unsigned long ndomains;
1426 int id;
1427 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001428 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 pr_debug("Set context mapping for %02x:%02x.%d\n",
1431 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001432
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001434 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1435 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001436
David Woodhouse276dbf92009-04-04 01:45:37 +01001437 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001438 if (!iommu)
1439 return -ENODEV;
1440
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441 context = device_to_context_entry(iommu, bus, devfn);
1442 if (!context)
1443 return -ENOMEM;
1444 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001445 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 spin_unlock_irqrestore(&iommu->lock, flags);
1447 return 0;
1448 }
1449
Weidong Hanea6606b2008-12-08 23:08:15 +08001450 id = domain->id;
1451 pgd = domain->pgd;
1452
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001453 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1454 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001455 int found = 0;
1456
1457 /* find an available domain id for this device in iommu */
1458 ndomains = cap_ndoms(iommu->cap);
1459 num = find_first_bit(iommu->domain_ids, ndomains);
1460 for (; num < ndomains; ) {
1461 if (iommu->domains[num] == domain) {
1462 id = num;
1463 found = 1;
1464 break;
1465 }
1466 num = find_next_bit(iommu->domain_ids,
1467 cap_ndoms(iommu->cap), num+1);
1468 }
1469
1470 if (found == 0) {
1471 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1472 if (num >= ndomains) {
1473 spin_unlock_irqrestore(&iommu->lock, flags);
1474 printk(KERN_ERR "IOMMU: no free domain ids\n");
1475 return -EFAULT;
1476 }
1477
1478 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001479 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001480 iommu->domains[num] = domain;
1481 id = num;
1482 }
1483
1484 /* Skip top levels of page tables for
1485 * iommu which has less agaw than default.
1486 */
1487 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1488 pgd = phys_to_virt(dma_pte_addr(pgd));
1489 if (!dma_pte_present(pgd)) {
1490 spin_unlock_irqrestore(&iommu->lock, flags);
1491 return -ENOMEM;
1492 }
1493 }
1494 }
1495
1496 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001497
Yu Zhao93a23a72009-05-18 13:51:37 +08001498 if (translation != CONTEXT_TT_PASS_THROUGH) {
1499 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1500 translation = info ? CONTEXT_TT_DEV_IOTLB :
1501 CONTEXT_TT_MULTI_LEVEL;
1502 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001503 /*
1504 * In pass through mode, AW must be programmed to indicate the largest
1505 * AGAW value supported by hardware. And ASR is ignored by hardware.
1506 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001507 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001508 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001509 else {
1510 context_set_address_root(context, virt_to_phys(pgd));
1511 context_set_address_width(context, iommu->agaw);
1512 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001513
1514 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001515 context_set_fault_enable(context);
1516 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001517 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001519 /*
1520 * It's a non-present to present mapping. If hardware doesn't cache
1521 * non-present entry we only need to flush the write-buffer. If the
1522 * _does_ cache non-present entries, then it does so in the special
1523 * domain #0, which we have to flush:
1524 */
1525 if (cap_caching_mode(iommu->cap)) {
1526 iommu->flush.flush_context(iommu, 0,
1527 (((u16)bus) << 8) | devfn,
1528 DMA_CCMD_MASK_NOBIT,
1529 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001530 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001531 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001533 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001534 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001535 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001536
1537 spin_lock_irqsave(&domain->iommu_lock, flags);
1538 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1539 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001540 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001541 }
1542 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543 return 0;
1544}
1545
1546static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001547domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1548 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001549{
1550 int ret;
1551 struct pci_dev *tmp, *parent;
1552
David Woodhouse276dbf92009-04-04 01:45:37 +01001553 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001554 pdev->bus->number, pdev->devfn,
1555 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001556 if (ret)
1557 return ret;
1558
1559 /* dependent device mapping */
1560 tmp = pci_find_upstream_pcie_bridge(pdev);
1561 if (!tmp)
1562 return 0;
1563 /* Secondary interface's bus number and devfn 0 */
1564 parent = pdev->bus->self;
1565 while (parent != tmp) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001566 ret = domain_context_mapping_one(domain,
1567 pci_domain_nr(parent->bus),
1568 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001569 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001570 if (ret)
1571 return ret;
1572 parent = parent->bus->self;
1573 }
1574 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1575 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001576 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001577 tmp->subordinate->number, 0,
1578 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579 else /* this is a legacy PCI bridge */
1580 return domain_context_mapping_one(domain,
David Woodhouse276dbf92009-04-04 01:45:37 +01001581 pci_domain_nr(tmp->bus),
1582 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001583 tmp->devfn,
1584 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001585}
1586
Weidong Han5331fe62008-12-08 23:00:00 +08001587static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001588{
1589 int ret;
1590 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001591 struct intel_iommu *iommu;
1592
David Woodhouse276dbf92009-04-04 01:45:37 +01001593 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1594 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001595 if (!iommu)
1596 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001597
David Woodhouse276dbf92009-04-04 01:45:37 +01001598 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001599 if (!ret)
1600 return ret;
1601 /* dependent device mapping */
1602 tmp = pci_find_upstream_pcie_bridge(pdev);
1603 if (!tmp)
1604 return ret;
1605 /* Secondary interface's bus number and devfn 0 */
1606 parent = pdev->bus->self;
1607 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001608 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01001609 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001610 if (!ret)
1611 return ret;
1612 parent = parent->bus->self;
1613 }
1614 if (tmp->is_pcie)
David Woodhouse276dbf92009-04-04 01:45:37 +01001615 return device_context_mapped(iommu, tmp->subordinate->number,
1616 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001617 else
David Woodhouse276dbf92009-04-04 01:45:37 +01001618 return device_context_mapped(iommu, tmp->bus->number,
1619 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620}
1621
1622static int
1623domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1624 u64 hpa, size_t size, int prot)
1625{
1626 u64 start_pfn, end_pfn;
1627 struct dma_pte *pte;
1628 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001629 int addr_width = agaw_to_width(domain->agaw);
1630
1631 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001632
1633 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1634 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001635 iova &= PAGE_MASK;
1636 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1637 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001638 index = 0;
1639 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001640 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001641 if (!pte)
1642 return -ENOMEM;
1643 /* We don't need lock here, nobody else
1644 * touches the iova range
1645 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001646 BUG_ON(dma_pte_addr(pte));
1647 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1648 dma_set_pte_prot(pte, prot);
Sheng Yang9cf06692009-03-18 15:33:07 +08001649 if (prot & DMA_PTE_SNP)
1650 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001651 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001652 start_pfn++;
1653 index++;
1654 }
1655 return 0;
1656}
1657
Weidong Hanc7151a82008-12-08 22:51:37 +08001658static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659{
Weidong Hanc7151a82008-12-08 22:51:37 +08001660 if (!iommu)
1661 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001662
1663 clear_context_table(iommu, bus, devfn);
1664 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001665 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001666 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001667}
1668
1669static void domain_remove_dev_info(struct dmar_domain *domain)
1670{
1671 struct device_domain_info *info;
1672 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001673 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001674
1675 spin_lock_irqsave(&device_domain_lock, flags);
1676 while (!list_empty(&domain->devices)) {
1677 info = list_entry(domain->devices.next,
1678 struct device_domain_info, link);
1679 list_del(&info->link);
1680 list_del(&info->global);
1681 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001682 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001683 spin_unlock_irqrestore(&device_domain_lock, flags);
1684
Yu Zhao93a23a72009-05-18 13:51:37 +08001685 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01001686 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001687 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001688 free_devinfo_mem(info);
1689
1690 spin_lock_irqsave(&device_domain_lock, flags);
1691 }
1692 spin_unlock_irqrestore(&device_domain_lock, flags);
1693}
1694
1695/*
1696 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001697 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001698 */
Kay, Allen M38717942008-09-09 18:37:29 +03001699static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700find_domain(struct pci_dev *pdev)
1701{
1702 struct device_domain_info *info;
1703
1704 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001705 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706 if (info)
1707 return info->domain;
1708 return NULL;
1709}
1710
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711/* domain is initialized */
1712static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1713{
1714 struct dmar_domain *domain, *found = NULL;
1715 struct intel_iommu *iommu;
1716 struct dmar_drhd_unit *drhd;
1717 struct device_domain_info *info, *tmp;
1718 struct pci_dev *dev_tmp;
1719 unsigned long flags;
1720 int bus = 0, devfn = 0;
David Woodhouse276dbf92009-04-04 01:45:37 +01001721 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001722 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723
1724 domain = find_domain(pdev);
1725 if (domain)
1726 return domain;
1727
David Woodhouse276dbf92009-04-04 01:45:37 +01001728 segment = pci_domain_nr(pdev->bus);
1729
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1731 if (dev_tmp) {
1732 if (dev_tmp->is_pcie) {
1733 bus = dev_tmp->subordinate->number;
1734 devfn = 0;
1735 } else {
1736 bus = dev_tmp->bus->number;
1737 devfn = dev_tmp->devfn;
1738 }
1739 spin_lock_irqsave(&device_domain_lock, flags);
1740 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001741 if (info->segment == segment &&
1742 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743 found = info->domain;
1744 break;
1745 }
1746 }
1747 spin_unlock_irqrestore(&device_domain_lock, flags);
1748 /* pcie-pci bridge already has a domain, uses it */
1749 if (found) {
1750 domain = found;
1751 goto found_domain;
1752 }
1753 }
1754
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001755 domain = alloc_domain();
1756 if (!domain)
1757 goto error;
1758
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759 /* Allocate new domain for the device */
1760 drhd = dmar_find_matched_drhd_unit(pdev);
1761 if (!drhd) {
1762 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1763 pci_name(pdev));
1764 return NULL;
1765 }
1766 iommu = drhd->iommu;
1767
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001768 ret = iommu_attach_domain(domain, iommu);
1769 if (ret) {
1770 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001771 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001772 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773
1774 if (domain_init(domain, gaw)) {
1775 domain_exit(domain);
1776 goto error;
1777 }
1778
1779 /* register pcie-to-pci device */
1780 if (dev_tmp) {
1781 info = alloc_devinfo_mem();
1782 if (!info) {
1783 domain_exit(domain);
1784 goto error;
1785 }
David Woodhouse276dbf92009-04-04 01:45:37 +01001786 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001787 info->bus = bus;
1788 info->devfn = devfn;
1789 info->dev = NULL;
1790 info->domain = domain;
1791 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001792 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001793
1794 /* pcie-to-pci bridge already has a domain, uses it */
1795 found = NULL;
1796 spin_lock_irqsave(&device_domain_lock, flags);
1797 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf92009-04-04 01:45:37 +01001798 if (tmp->segment == segment &&
1799 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001800 found = tmp->domain;
1801 break;
1802 }
1803 }
1804 if (found) {
1805 free_devinfo_mem(info);
1806 domain_exit(domain);
1807 domain = found;
1808 } else {
1809 list_add(&info->link, &domain->devices);
1810 list_add(&info->global, &device_domain_list);
1811 }
1812 spin_unlock_irqrestore(&device_domain_lock, flags);
1813 }
1814
1815found_domain:
1816 info = alloc_devinfo_mem();
1817 if (!info)
1818 goto error;
David Woodhouse276dbf92009-04-04 01:45:37 +01001819 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001820 info->bus = pdev->bus->number;
1821 info->devfn = pdev->devfn;
1822 info->dev = pdev;
1823 info->domain = domain;
1824 spin_lock_irqsave(&device_domain_lock, flags);
1825 /* somebody is fast */
1826 found = find_domain(pdev);
1827 if (found != NULL) {
1828 spin_unlock_irqrestore(&device_domain_lock, flags);
1829 if (found != domain) {
1830 domain_exit(domain);
1831 domain = found;
1832 }
1833 free_devinfo_mem(info);
1834 return domain;
1835 }
1836 list_add(&info->link, &domain->devices);
1837 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001838 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001839 spin_unlock_irqrestore(&device_domain_lock, flags);
1840 return domain;
1841error:
1842 /* recheck it here, maybe others set it */
1843 return find_domain(pdev);
1844}
1845
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001846static int iommu_identity_mapping;
1847
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001848static int iommu_prepare_identity_map(struct pci_dev *pdev,
1849 unsigned long long start,
1850 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851{
1852 struct dmar_domain *domain;
1853 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001854 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001855 int ret;
1856
1857 printk(KERN_INFO
1858 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1859 pci_name(pdev), start, end);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001860 if (iommu_identity_mapping)
1861 domain = si_domain;
1862 else
1863 /* page table init */
1864 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865 if (!domain)
1866 return -ENOMEM;
1867
1868 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001869 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001870 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001871 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001872 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1873 IOVA_PFN(base + size) - 1)) {
1874 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1875 ret = -ENOMEM;
1876 goto error;
1877 }
1878
1879 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1880 size, base, pci_name(pdev));
1881 /*
1882 * RMRR range might have overlap with physical memory range,
1883 * clear it first
1884 */
1885 dma_pte_clear_range(domain, base, base + size);
1886
1887 ret = domain_page_mapping(domain, base, base, size,
1888 DMA_PTE_READ|DMA_PTE_WRITE);
1889 if (ret)
1890 goto error;
1891
1892 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001893 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001894 if (!ret)
1895 return 0;
1896error:
1897 domain_exit(domain);
1898 return ret;
1899
1900}
1901
1902static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1903 struct pci_dev *pdev)
1904{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001905 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001906 return 0;
1907 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1908 rmrr->end_address + 1);
1909}
1910
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001911#ifdef CONFIG_DMAR_GFX_WA
Yinghai Lud52d53b2008-06-16 20:10:55 -07001912struct iommu_prepare_data {
1913 struct pci_dev *pdev;
1914 int ret;
1915};
1916
1917static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1918 unsigned long end_pfn, void *datax)
1919{
1920 struct iommu_prepare_data *data;
1921
1922 data = (struct iommu_prepare_data *)datax;
1923
1924 data->ret = iommu_prepare_identity_map(data->pdev,
1925 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1926 return data->ret;
1927
1928}
1929
1930static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1931{
1932 int nid;
1933 struct iommu_prepare_data data;
1934
1935 data.pdev = pdev;
1936 data.ret = 0;
1937
1938 for_each_online_node(nid) {
1939 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1940 if (data.ret)
1941 return data.ret;
1942 }
1943 return data.ret;
1944}
1945
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001946static void __init iommu_prepare_gfx_mapping(void)
1947{
1948 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001949 int ret;
1950
1951 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001952 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001953 !IS_GFX_DEVICE(pdev))
1954 continue;
1955 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1956 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001957 ret = iommu_prepare_with_active_regions(pdev);
1958 if (ret)
1959 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001960 }
1961}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001962#else /* !CONFIG_DMAR_GFX_WA */
1963static inline void iommu_prepare_gfx_mapping(void)
1964{
1965 return;
1966}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001967#endif
1968
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001969#ifdef CONFIG_DMAR_FLOPPY_WA
1970static inline void iommu_prepare_isa(void)
1971{
1972 struct pci_dev *pdev;
1973 int ret;
1974
1975 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1976 if (!pdev)
1977 return;
1978
1979 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1980 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1981
1982 if (ret)
Frank Seidel1c35b8e2009-02-06 10:23:36 +01001983 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001984 "floppy might not work\n");
1985
1986}
1987#else
1988static inline void iommu_prepare_isa(void)
1989{
1990 return;
1991}
1992#endif /* !CONFIG_DMAR_FLPY_WA */
1993
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001994/* Initialize each context entry as pass through.*/
1995static int __init init_context_pass_through(void)
1996{
1997 struct pci_dev *pdev = NULL;
1998 struct dmar_domain *domain;
1999 int ret;
2000
2001 for_each_pci_dev(pdev) {
2002 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2003 ret = domain_context_mapping(domain, pdev,
2004 CONTEXT_TT_PASS_THROUGH);
2005 if (ret)
2006 return ret;
2007 }
2008 return 0;
2009}
2010
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002011static int md_domain_init(struct dmar_domain *domain, int guest_width);
2012static int si_domain_init(void)
2013{
2014 struct dmar_drhd_unit *drhd;
2015 struct intel_iommu *iommu;
2016 int ret = 0;
2017
2018 si_domain = alloc_domain();
2019 if (!si_domain)
2020 return -EFAULT;
2021
2022
2023 for_each_active_iommu(iommu, drhd) {
2024 ret = iommu_attach_domain(si_domain, iommu);
2025 if (ret) {
2026 domain_exit(si_domain);
2027 return -EFAULT;
2028 }
2029 }
2030
2031 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2032 domain_exit(si_domain);
2033 return -EFAULT;
2034 }
2035
2036 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2037
2038 return 0;
2039}
2040
2041static void domain_remove_one_dev_info(struct dmar_domain *domain,
2042 struct pci_dev *pdev);
2043static int identity_mapping(struct pci_dev *pdev)
2044{
2045 struct device_domain_info *info;
2046
2047 if (likely(!iommu_identity_mapping))
2048 return 0;
2049
2050
2051 list_for_each_entry(info, &si_domain->devices, link)
2052 if (info->dev == pdev)
2053 return 1;
2054 return 0;
2055}
2056
2057static int domain_add_dev_info(struct dmar_domain *domain,
2058 struct pci_dev *pdev)
2059{
2060 struct device_domain_info *info;
2061 unsigned long flags;
2062
2063 info = alloc_devinfo_mem();
2064 if (!info)
2065 return -ENOMEM;
2066
2067 info->segment = pci_domain_nr(pdev->bus);
2068 info->bus = pdev->bus->number;
2069 info->devfn = pdev->devfn;
2070 info->dev = pdev;
2071 info->domain = domain;
2072
2073 spin_lock_irqsave(&device_domain_lock, flags);
2074 list_add(&info->link, &domain->devices);
2075 list_add(&info->global, &device_domain_list);
2076 pdev->dev.archdata.iommu = info;
2077 spin_unlock_irqrestore(&device_domain_lock, flags);
2078
2079 return 0;
2080}
2081
2082static int iommu_prepare_static_identity_mapping(void)
2083{
2084 int i;
2085 struct pci_dev *pdev = NULL;
2086 int ret;
2087
2088 ret = si_domain_init();
2089 if (ret)
2090 return -EFAULT;
2091
2092 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2093 for_each_pci_dev(pdev) {
2094 for (i = 0; i < e820.nr_map; i++) {
2095 struct e820entry *ei = &e820.map[i];
2096
2097 if (ei->type == E820_RAM) {
2098 ret = iommu_prepare_identity_map(pdev,
2099 ei->addr, ei->addr + ei->size);
2100 if (ret) {
2101 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2102 return -EFAULT;
2103 }
2104 }
2105 }
2106 ret = domain_add_dev_info(si_domain, pdev);
2107 if (ret)
2108 return ret;
2109 }
2110
2111 return 0;
2112}
2113
2114int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002115{
2116 struct dmar_drhd_unit *drhd;
2117 struct dmar_rmrr_unit *rmrr;
2118 struct pci_dev *pdev;
2119 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002120 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002121 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122
2123 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002124 * In case pass through can not be enabled, iommu tries to use identity
2125 * mapping.
2126 */
2127 if (iommu_pass_through)
2128 iommu_identity_mapping = 1;
2129
2130 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002131 * for each drhd
2132 * allocate root
2133 * initialize and program root entry to not present
2134 * endfor
2135 */
2136 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002137 g_num_of_iommus++;
2138 /*
2139 * lock not needed as this is only incremented in the single
2140 * threaded kernel __init code path all other access are read
2141 * only
2142 */
2143 }
2144
Weidong Hand9630fe2008-12-08 11:06:32 +08002145 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2146 GFP_KERNEL);
2147 if (!g_iommus) {
2148 printk(KERN_ERR "Allocating global iommu array failed\n");
2149 ret = -ENOMEM;
2150 goto error;
2151 }
2152
mark gross80b20dd2008-04-18 13:53:58 -07002153 deferred_flush = kzalloc(g_num_of_iommus *
2154 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2155 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002156 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002157 ret = -ENOMEM;
2158 goto error;
2159 }
2160
mark gross5e0d2a62008-03-04 15:22:08 -08002161 for_each_drhd_unit(drhd) {
2162 if (drhd->ignored)
2163 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002164
2165 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002166 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002167
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002168 ret = iommu_init_domains(iommu);
2169 if (ret)
2170 goto error;
2171
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002172 /*
2173 * TBD:
2174 * we could share the same root & context tables
2175 * amoung all IOMMU's. Need to Split it later.
2176 */
2177 ret = iommu_alloc_root_entry(iommu);
2178 if (ret) {
2179 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2180 goto error;
2181 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002182 if (!ecap_pass_through(iommu->ecap))
2183 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002184 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002185 if (iommu_pass_through)
2186 if (!pass_through) {
2187 printk(KERN_INFO
2188 "Pass Through is not supported by hardware.\n");
2189 iommu_pass_through = 0;
2190 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002191
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002192 /*
2193 * Start from the sane iommu hardware state.
2194 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002195 for_each_drhd_unit(drhd) {
2196 if (drhd->ignored)
2197 continue;
2198
2199 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002200
2201 /*
2202 * If the queued invalidation is already initialized by us
2203 * (for example, while enabling interrupt-remapping) then
2204 * we got the things already rolling from a sane state.
2205 */
2206 if (iommu->qi)
2207 continue;
2208
2209 /*
2210 * Clear any previous faults.
2211 */
2212 dmar_fault(-1, iommu);
2213 /*
2214 * Disable queued invalidation if supported and already enabled
2215 * before OS handover.
2216 */
2217 dmar_disable_qi(iommu);
2218 }
2219
2220 for_each_drhd_unit(drhd) {
2221 if (drhd->ignored)
2222 continue;
2223
2224 iommu = drhd->iommu;
2225
Youquan Songa77b67d2008-10-16 16:31:56 -07002226 if (dmar_enable_qi(iommu)) {
2227 /*
2228 * Queued Invalidate not enabled, use Register Based
2229 * Invalidate
2230 */
2231 iommu->flush.flush_context = __iommu_flush_context;
2232 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2233 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002234 "invalidation\n",
2235 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002236 } else {
2237 iommu->flush.flush_context = qi_flush_context;
2238 iommu->flush.flush_iotlb = qi_flush_iotlb;
2239 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002240 "invalidation\n",
2241 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002242 }
2243 }
2244
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002245 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002246 * If pass through is set and enabled, context entries of all pci
2247 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002248 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002249 if (iommu_pass_through) {
2250 ret = init_context_pass_through();
2251 if (ret) {
2252 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2253 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002254 }
2255 }
2256
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002257 /*
2258 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002259 * identity mappings for rmrr, gfx, and isa and may fall back to static
2260 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002261 */
2262 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002263 if (iommu_identity_mapping)
2264 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002265 /*
2266 * For each rmrr
2267 * for each dev attached to rmrr
2268 * do
2269 * locate drhd for dev, alloc domain for dev
2270 * allocate free domain
2271 * allocate page table entries for rmrr
2272 * if context not allocated for bus
2273 * allocate and init context
2274 * set present in root table for this bus
2275 * init context with domain, translation etc
2276 * endfor
2277 * endfor
2278 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002279 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002280 for_each_rmrr_units(rmrr) {
2281 for (i = 0; i < rmrr->devices_cnt; i++) {
2282 pdev = rmrr->devices[i];
2283 /*
2284 * some BIOS lists non-exist devices in DMAR
2285 * table.
2286 */
2287 if (!pdev)
2288 continue;
2289 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2290 if (ret)
2291 printk(KERN_ERR
2292 "IOMMU: mapping reserved region failed\n");
2293 }
2294 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002295
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002296 iommu_prepare_gfx_mapping();
2297
2298 iommu_prepare_isa();
2299 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002300
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002301 /*
2302 * for each drhd
2303 * enable fault log
2304 * global invalidate context cache
2305 * global invalidate iotlb
2306 * enable translation
2307 */
2308 for_each_drhd_unit(drhd) {
2309 if (drhd->ignored)
2310 continue;
2311 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002312
2313 iommu_flush_write_buffer(iommu);
2314
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002315 ret = dmar_set_interrupt(iommu);
2316 if (ret)
2317 goto error;
2318
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002319 iommu_set_root_entry(iommu);
2320
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002321 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002322 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002323 iommu_disable_protect_mem_regions(iommu);
2324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002325 ret = iommu_enable_translation(iommu);
2326 if (ret)
2327 goto error;
2328 }
2329
2330 return 0;
2331error:
2332 for_each_drhd_unit(drhd) {
2333 if (drhd->ignored)
2334 continue;
2335 iommu = drhd->iommu;
2336 free_iommu(iommu);
2337 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002338 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002339 return ret;
2340}
2341
2342static inline u64 aligned_size(u64 host_addr, size_t size)
2343{
2344 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002345 addr = (host_addr & (~PAGE_MASK)) + size;
2346 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002347}
2348
2349struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002350iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002351{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002352 struct iova *piova;
2353
2354 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002355 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002356 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002357 return NULL;
2358
2359 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002360 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002361 return piova;
2362}
2363
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002364static struct iova *
2365__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002366 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002367{
2368 struct pci_dev *pdev = to_pci_dev(dev);
2369 struct iova *iova = NULL;
2370
Yang Hongyang284901a2009-04-06 19:01:15 -07002371 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002372 iova = iommu_alloc_iova(domain, size, dma_mask);
2373 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002374 /*
2375 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002376 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002377 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002378 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002379 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002380 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002381 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002382 }
2383
2384 if (!iova) {
2385 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2386 return NULL;
2387 }
2388
2389 return iova;
2390}
2391
2392static struct dmar_domain *
2393get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002394{
2395 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397
2398 domain = get_domain_for_dev(pdev,
2399 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2400 if (!domain) {
2401 printk(KERN_ERR
2402 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002403 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002404 }
2405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002406 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002407 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002408 ret = domain_context_mapping(domain, pdev,
2409 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002410 if (ret) {
2411 printk(KERN_ERR
2412 "Domain context map for %s failed",
2413 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002414 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002415 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002416 }
2417
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002418 return domain;
2419}
2420
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002421static int iommu_dummy(struct pci_dev *pdev)
2422{
2423 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2424}
2425
2426/* Check if the pdev needs to go through non-identity map and unmap process.*/
2427static int iommu_no_mapping(struct pci_dev *pdev)
2428{
2429 int found;
2430
2431 if (!iommu_identity_mapping)
2432 return iommu_dummy(pdev);
2433
2434 found = identity_mapping(pdev);
2435 if (found) {
2436 if (pdev->dma_mask > DMA_BIT_MASK(32))
2437 return 1;
2438 else {
2439 /*
2440 * 32 bit DMA is removed from si_domain and fall back
2441 * to non-identity mapping.
2442 */
2443 domain_remove_one_dev_info(si_domain, pdev);
2444 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2445 pci_name(pdev));
2446 return 0;
2447 }
2448 } else {
2449 /*
2450 * In case of a detached 64 bit DMA device from vm, the device
2451 * is put into si_domain for identity mapping.
2452 */
2453 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2454 int ret;
2455 ret = domain_add_dev_info(si_domain, pdev);
2456 if (!ret) {
2457 printk(KERN_INFO "64bit %s uses identity mapping\n",
2458 pci_name(pdev));
2459 return 1;
2460 }
2461 }
2462 }
2463
2464 return iommu_dummy(pdev);
2465}
2466
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002467static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2468 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002469{
2470 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002471 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002472 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002473 struct iova *iova;
2474 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002475 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002476 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002477
2478 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002479
2480 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002481 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002482
2483 domain = get_valid_domain_for_dev(pdev);
2484 if (!domain)
2485 return 0;
2486
Weidong Han8c11e792008-12-08 15:29:22 +08002487 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002488 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002489
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002490 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002491 if (!iova)
2492 goto error;
2493
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002494 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002495
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002496 /*
2497 * Check if DMAR supports zero-length reads on write only
2498 * mappings..
2499 */
2500 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002501 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002502 prot |= DMA_PTE_READ;
2503 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2504 prot |= DMA_PTE_WRITE;
2505 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002506 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002507 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002508 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002509 * is not a big problem
2510 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002511 ret = domain_page_mapping(domain, start_paddr,
David Woodhousefd18de52009-05-10 23:57:41 +01002512 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2513 size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514 if (ret)
2515 goto error;
2516
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002517 /* it's a non-present to present mapping. Only flush if caching mode */
2518 if (cap_caching_mode(iommu->cap))
2519 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2520 size >> VTD_PAGE_SHIFT);
2521 else
Weidong Han8c11e792008-12-08 15:29:22 +08002522 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002523
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002524 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002525
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002526error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002527 if (iova)
2528 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002529 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002530 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002531 return 0;
2532}
2533
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002534static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2535 unsigned long offset, size_t size,
2536 enum dma_data_direction dir,
2537 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002538{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002539 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2540 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002541}
2542
mark gross5e0d2a62008-03-04 15:22:08 -08002543static void flush_unmaps(void)
2544{
mark gross80b20dd2008-04-18 13:53:58 -07002545 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002546
mark gross5e0d2a62008-03-04 15:22:08 -08002547 timer_on = 0;
2548
2549 /* just flush them all */
2550 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002551 struct intel_iommu *iommu = g_iommus[i];
2552 if (!iommu)
2553 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002554
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002555 if (!deferred_flush[i].next)
2556 continue;
2557
2558 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002559 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002560 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002561 unsigned long mask;
2562 struct iova *iova = deferred_flush[i].iova[j];
2563
2564 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2565 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2566 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2567 iova->pfn_lo << PAGE_SHIFT, mask);
2568 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002569 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002570 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002571 }
2572
mark gross5e0d2a62008-03-04 15:22:08 -08002573 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002574}
2575
2576static void flush_unmaps_timeout(unsigned long data)
2577{
mark gross80b20dd2008-04-18 13:53:58 -07002578 unsigned long flags;
2579
2580 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002581 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002582 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002583}
2584
2585static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2586{
2587 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002588 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002589 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002590
2591 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002592 if (list_size == HIGH_WATER_MARK)
2593 flush_unmaps();
2594
Weidong Han8c11e792008-12-08 15:29:22 +08002595 iommu = domain_get_iommu(dom);
2596 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002597
mark gross80b20dd2008-04-18 13:53:58 -07002598 next = deferred_flush[iommu_id].next;
2599 deferred_flush[iommu_id].domain[next] = dom;
2600 deferred_flush[iommu_id].iova[next] = iova;
2601 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002602
2603 if (!timer_on) {
2604 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2605 timer_on = 1;
2606 }
2607 list_size++;
2608 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2609}
2610
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002611static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2612 size_t size, enum dma_data_direction dir,
2613 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002614{
2615 struct pci_dev *pdev = to_pci_dev(dev);
2616 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002617 unsigned long start_addr;
2618 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002619 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002620
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002621 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002622 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002623
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002624 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625 BUG_ON(!domain);
2626
Weidong Han8c11e792008-12-08 15:29:22 +08002627 iommu = domain_get_iommu(domain);
2628
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002629 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2630 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002632
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002633 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002634 size = aligned_size((u64)dev_addr, size);
2635
David Woodhouse4cf2e752009-02-11 17:23:43 +00002636 pr_debug("Device %s unmapping: %zx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002637 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002638
2639 /* clear the whole page */
2640 dma_pte_clear_range(domain, start_addr, start_addr + size);
2641 /* free page tables */
2642 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002643 if (intel_iommu_strict) {
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002644 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2645 size >> VTD_PAGE_SHIFT);
mark gross5e0d2a62008-03-04 15:22:08 -08002646 /* free iova */
2647 __free_iova(&domain->iovad, iova);
2648 } else {
2649 add_unmap(domain, iova);
2650 /*
2651 * queue up the release of the unmap to save the 1/6th of the
2652 * cpu used up by the iotlb flush operation...
2653 */
mark gross5e0d2a62008-03-04 15:22:08 -08002654 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002655}
2656
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002657static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2658 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002659{
2660 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2661}
2662
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002663static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2664 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002665{
2666 void *vaddr;
2667 int order;
2668
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002669 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002670 order = get_order(size);
2671 flags &= ~(GFP_DMA | GFP_DMA32);
2672
2673 vaddr = (void *)__get_free_pages(flags, order);
2674 if (!vaddr)
2675 return NULL;
2676 memset(vaddr, 0, size);
2677
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002678 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2679 DMA_BIDIRECTIONAL,
2680 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002681 if (*dma_handle)
2682 return vaddr;
2683 free_pages((unsigned long)vaddr, order);
2684 return NULL;
2685}
2686
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002687static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2688 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002689{
2690 int order;
2691
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002692 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002693 order = get_order(size);
2694
2695 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2696 free_pages((unsigned long)vaddr, order);
2697}
2698
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002699static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2700 int nelems, enum dma_data_direction dir,
2701 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002702{
2703 int i;
2704 struct pci_dev *pdev = to_pci_dev(hwdev);
2705 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002706 unsigned long start_addr;
2707 struct iova *iova;
2708 size_t size = 0;
David Woodhouse4cf2e752009-02-11 17:23:43 +00002709 phys_addr_t addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002710 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002711 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002712
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002713 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002714 return;
2715
2716 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002717 BUG_ON(!domain);
2718
2719 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002721 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002722 if (!iova)
2723 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002724 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002725 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002726 size += aligned_size((u64)addr, sg->length);
2727 }
2728
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002729 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002730
2731 /* clear the whole page */
2732 dma_pte_clear_range(domain, start_addr, start_addr + size);
2733 /* free page tables */
2734 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2735
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002736 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2737 size >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002738
2739 /* free iova */
2740 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002741}
2742
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002743static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002744 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002745{
2746 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002747 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002748
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002749 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002750 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002751 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002752 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002753 }
2754 return nelems;
2755}
2756
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002757static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2758 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002759{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002760 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762 struct pci_dev *pdev = to_pci_dev(hwdev);
2763 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002764 size_t size = 0;
2765 int prot = 0;
2766 size_t offset = 0;
2767 struct iova *iova = NULL;
2768 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002769 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002770 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002771 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002772
2773 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002774 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002775 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002776
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002777 domain = get_valid_domain_for_dev(pdev);
2778 if (!domain)
2779 return 0;
2780
Weidong Han8c11e792008-12-08 15:29:22 +08002781 iommu = domain_get_iommu(domain);
2782
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002783 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002784 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002785 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002786 }
2787
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002788 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002789 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002790 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002791 return 0;
2792 }
2793
2794 /*
2795 * Check if DMAR supports zero-length reads on write only
2796 * mappings..
2797 */
2798 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002799 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002800 prot |= DMA_PTE_READ;
2801 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2802 prot |= DMA_PTE_WRITE;
2803
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002804 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002806 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002807 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808 size = aligned_size((u64)addr, sg->length);
2809 ret = domain_page_mapping(domain, start_addr + offset,
David Woodhousefd18de52009-05-10 23:57:41 +01002810 ((u64)addr) & PHYSICAL_PAGE_MASK,
2811 size, prot);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002812 if (ret) {
2813 /* clear the page */
2814 dma_pte_clear_range(domain, start_addr,
2815 start_addr + offset);
2816 /* free page tables */
2817 dma_pte_free_pagetable(domain, start_addr,
2818 start_addr + offset);
2819 /* free iova */
2820 __free_iova(&domain->iovad, iova);
2821 return 0;
2822 }
2823 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002824 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002825 sg->dma_length = sg->length;
2826 offset += size;
2827 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002828
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002829 /* it's a non-present to present mapping. Only flush if caching mode */
2830 if (cap_caching_mode(iommu->cap))
2831 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2832 offset >> VTD_PAGE_SHIFT);
2833 else
Weidong Han8c11e792008-12-08 15:29:22 +08002834 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002835
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 return nelems;
2837}
2838
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002839static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2840{
2841 return !dma_addr;
2842}
2843
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002844struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002845 .alloc_coherent = intel_alloc_coherent,
2846 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002847 .map_sg = intel_map_sg,
2848 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002849 .map_page = intel_map_page,
2850 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002851 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852};
2853
2854static inline int iommu_domain_cache_init(void)
2855{
2856 int ret = 0;
2857
2858 iommu_domain_cache = kmem_cache_create("iommu_domain",
2859 sizeof(struct dmar_domain),
2860 0,
2861 SLAB_HWCACHE_ALIGN,
2862
2863 NULL);
2864 if (!iommu_domain_cache) {
2865 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2866 ret = -ENOMEM;
2867 }
2868
2869 return ret;
2870}
2871
2872static inline int iommu_devinfo_cache_init(void)
2873{
2874 int ret = 0;
2875
2876 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2877 sizeof(struct device_domain_info),
2878 0,
2879 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002880 NULL);
2881 if (!iommu_devinfo_cache) {
2882 printk(KERN_ERR "Couldn't create devinfo cache\n");
2883 ret = -ENOMEM;
2884 }
2885
2886 return ret;
2887}
2888
2889static inline int iommu_iova_cache_init(void)
2890{
2891 int ret = 0;
2892
2893 iommu_iova_cache = kmem_cache_create("iommu_iova",
2894 sizeof(struct iova),
2895 0,
2896 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002897 NULL);
2898 if (!iommu_iova_cache) {
2899 printk(KERN_ERR "Couldn't create iova cache\n");
2900 ret = -ENOMEM;
2901 }
2902
2903 return ret;
2904}
2905
2906static int __init iommu_init_mempool(void)
2907{
2908 int ret;
2909 ret = iommu_iova_cache_init();
2910 if (ret)
2911 return ret;
2912
2913 ret = iommu_domain_cache_init();
2914 if (ret)
2915 goto domain_error;
2916
2917 ret = iommu_devinfo_cache_init();
2918 if (!ret)
2919 return ret;
2920
2921 kmem_cache_destroy(iommu_domain_cache);
2922domain_error:
2923 kmem_cache_destroy(iommu_iova_cache);
2924
2925 return -ENOMEM;
2926}
2927
2928static void __init iommu_exit_mempool(void)
2929{
2930 kmem_cache_destroy(iommu_devinfo_cache);
2931 kmem_cache_destroy(iommu_domain_cache);
2932 kmem_cache_destroy(iommu_iova_cache);
2933
2934}
2935
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002936static void __init init_no_remapping_devices(void)
2937{
2938 struct dmar_drhd_unit *drhd;
2939
2940 for_each_drhd_unit(drhd) {
2941 if (!drhd->include_all) {
2942 int i;
2943 for (i = 0; i < drhd->devices_cnt; i++)
2944 if (drhd->devices[i] != NULL)
2945 break;
2946 /* ignore DMAR unit if no pci devices exist */
2947 if (i == drhd->devices_cnt)
2948 drhd->ignored = 1;
2949 }
2950 }
2951
2952 if (dmar_map_gfx)
2953 return;
2954
2955 for_each_drhd_unit(drhd) {
2956 int i;
2957 if (drhd->ignored || drhd->include_all)
2958 continue;
2959
2960 for (i = 0; i < drhd->devices_cnt; i++)
2961 if (drhd->devices[i] &&
2962 !IS_GFX_DEVICE(drhd->devices[i]))
2963 break;
2964
2965 if (i < drhd->devices_cnt)
2966 continue;
2967
2968 /* bypass IOMMU if it is just for gfx devices */
2969 drhd->ignored = 1;
2970 for (i = 0; i < drhd->devices_cnt; i++) {
2971 if (!drhd->devices[i])
2972 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002973 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002974 }
2975 }
2976}
2977
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002978#ifdef CONFIG_SUSPEND
2979static int init_iommu_hw(void)
2980{
2981 struct dmar_drhd_unit *drhd;
2982 struct intel_iommu *iommu = NULL;
2983
2984 for_each_active_iommu(iommu, drhd)
2985 if (iommu->qi)
2986 dmar_reenable_qi(iommu);
2987
2988 for_each_active_iommu(iommu, drhd) {
2989 iommu_flush_write_buffer(iommu);
2990
2991 iommu_set_root_entry(iommu);
2992
2993 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002994 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002995 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002996 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002997 iommu_disable_protect_mem_regions(iommu);
2998 iommu_enable_translation(iommu);
2999 }
3000
3001 return 0;
3002}
3003
3004static void iommu_flush_all(void)
3005{
3006 struct dmar_drhd_unit *drhd;
3007 struct intel_iommu *iommu;
3008
3009 for_each_active_iommu(iommu, drhd) {
3010 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003011 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003012 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003013 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003014 }
3015}
3016
3017static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3018{
3019 struct dmar_drhd_unit *drhd;
3020 struct intel_iommu *iommu = NULL;
3021 unsigned long flag;
3022
3023 for_each_active_iommu(iommu, drhd) {
3024 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3025 GFP_ATOMIC);
3026 if (!iommu->iommu_state)
3027 goto nomem;
3028 }
3029
3030 iommu_flush_all();
3031
3032 for_each_active_iommu(iommu, drhd) {
3033 iommu_disable_translation(iommu);
3034
3035 spin_lock_irqsave(&iommu->register_lock, flag);
3036
3037 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3038 readl(iommu->reg + DMAR_FECTL_REG);
3039 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3040 readl(iommu->reg + DMAR_FEDATA_REG);
3041 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3042 readl(iommu->reg + DMAR_FEADDR_REG);
3043 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3044 readl(iommu->reg + DMAR_FEUADDR_REG);
3045
3046 spin_unlock_irqrestore(&iommu->register_lock, flag);
3047 }
3048 return 0;
3049
3050nomem:
3051 for_each_active_iommu(iommu, drhd)
3052 kfree(iommu->iommu_state);
3053
3054 return -ENOMEM;
3055}
3056
3057static int iommu_resume(struct sys_device *dev)
3058{
3059 struct dmar_drhd_unit *drhd;
3060 struct intel_iommu *iommu = NULL;
3061 unsigned long flag;
3062
3063 if (init_iommu_hw()) {
3064 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3065 return -EIO;
3066 }
3067
3068 for_each_active_iommu(iommu, drhd) {
3069
3070 spin_lock_irqsave(&iommu->register_lock, flag);
3071
3072 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3073 iommu->reg + DMAR_FECTL_REG);
3074 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3075 iommu->reg + DMAR_FEDATA_REG);
3076 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3077 iommu->reg + DMAR_FEADDR_REG);
3078 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3079 iommu->reg + DMAR_FEUADDR_REG);
3080
3081 spin_unlock_irqrestore(&iommu->register_lock, flag);
3082 }
3083
3084 for_each_active_iommu(iommu, drhd)
3085 kfree(iommu->iommu_state);
3086
3087 return 0;
3088}
3089
3090static struct sysdev_class iommu_sysclass = {
3091 .name = "iommu",
3092 .resume = iommu_resume,
3093 .suspend = iommu_suspend,
3094};
3095
3096static struct sys_device device_iommu = {
3097 .cls = &iommu_sysclass,
3098};
3099
3100static int __init init_iommu_sysfs(void)
3101{
3102 int error;
3103
3104 error = sysdev_class_register(&iommu_sysclass);
3105 if (error)
3106 return error;
3107
3108 error = sysdev_register(&device_iommu);
3109 if (error)
3110 sysdev_class_unregister(&iommu_sysclass);
3111
3112 return error;
3113}
3114
3115#else
3116static int __init init_iommu_sysfs(void)
3117{
3118 return 0;
3119}
3120#endif /* CONFIG_PM */
3121
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003122int __init intel_iommu_init(void)
3123{
3124 int ret = 0;
3125
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003126 if (dmar_table_init())
3127 return -ENODEV;
3128
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003129 if (dmar_dev_scope_init())
3130 return -ENODEV;
3131
Suresh Siddha2ae21012008-07-10 11:16:43 -07003132 /*
3133 * Check the need for DMA-remapping initialization now.
3134 * Above initialization will also be used by Interrupt-remapping.
3135 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003136 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003137 return -ENODEV;
3138
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003139 iommu_init_mempool();
3140 dmar_init_reserved_ranges();
3141
3142 init_no_remapping_devices();
3143
3144 ret = init_dmars();
3145 if (ret) {
3146 printk(KERN_ERR "IOMMU: dmar init failed\n");
3147 put_iova_domain(&reserved_iova_list);
3148 iommu_exit_mempool();
3149 return ret;
3150 }
3151 printk(KERN_INFO
3152 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3153
mark gross5e0d2a62008-03-04 15:22:08 -08003154 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003155 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003156
3157 if (!iommu_pass_through) {
3158 printk(KERN_INFO
3159 "Multi-level page-table translation for DMAR.\n");
3160 dma_ops = &intel_dma_ops;
3161 } else
3162 printk(KERN_INFO
3163 "DMAR: Pass through translation for DMAR.\n");
3164
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003165 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003166
3167 register_iommu(&intel_iommu_ops);
3168
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003169 return 0;
3170}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003171
Han, Weidong3199aa62009-02-26 17:31:12 +08003172static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3173 struct pci_dev *pdev)
3174{
3175 struct pci_dev *tmp, *parent;
3176
3177 if (!iommu || !pdev)
3178 return;
3179
3180 /* dependent device detach */
3181 tmp = pci_find_upstream_pcie_bridge(pdev);
3182 /* Secondary interface's bus number and devfn 0 */
3183 if (tmp) {
3184 parent = pdev->bus->self;
3185 while (parent != tmp) {
3186 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf92009-04-04 01:45:37 +01003187 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003188 parent = parent->bus->self;
3189 }
3190 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3191 iommu_detach_dev(iommu,
3192 tmp->subordinate->number, 0);
3193 else /* this is a legacy PCI bridge */
David Woodhouse276dbf92009-04-04 01:45:37 +01003194 iommu_detach_dev(iommu, tmp->bus->number,
3195 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003196 }
3197}
3198
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003199static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003200 struct pci_dev *pdev)
3201{
3202 struct device_domain_info *info;
3203 struct intel_iommu *iommu;
3204 unsigned long flags;
3205 int found = 0;
3206 struct list_head *entry, *tmp;
3207
David Woodhouse276dbf92009-04-04 01:45:37 +01003208 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3209 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003210 if (!iommu)
3211 return;
3212
3213 spin_lock_irqsave(&device_domain_lock, flags);
3214 list_for_each_safe(entry, tmp, &domain->devices) {
3215 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf92009-04-04 01:45:37 +01003216 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003217 if (info->bus == pdev->bus->number &&
3218 info->devfn == pdev->devfn) {
3219 list_del(&info->link);
3220 list_del(&info->global);
3221 if (info->dev)
3222 info->dev->dev.archdata.iommu = NULL;
3223 spin_unlock_irqrestore(&device_domain_lock, flags);
3224
Yu Zhao93a23a72009-05-18 13:51:37 +08003225 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003226 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003227 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003228 free_devinfo_mem(info);
3229
3230 spin_lock_irqsave(&device_domain_lock, flags);
3231
3232 if (found)
3233 break;
3234 else
3235 continue;
3236 }
3237
3238 /* if there is no other devices under the same iommu
3239 * owned by this domain, clear this iommu in iommu_bmp
3240 * update iommu count and coherency
3241 */
David Woodhouse276dbf92009-04-04 01:45:37 +01003242 if (iommu == device_to_iommu(info->segment, info->bus,
3243 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003244 found = 1;
3245 }
3246
3247 if (found == 0) {
3248 unsigned long tmp_flags;
3249 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3250 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3251 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003252 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003253 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3254 }
3255
3256 spin_unlock_irqrestore(&device_domain_lock, flags);
3257}
3258
3259static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3260{
3261 struct device_domain_info *info;
3262 struct intel_iommu *iommu;
3263 unsigned long flags1, flags2;
3264
3265 spin_lock_irqsave(&device_domain_lock, flags1);
3266 while (!list_empty(&domain->devices)) {
3267 info = list_entry(domain->devices.next,
3268 struct device_domain_info, link);
3269 list_del(&info->link);
3270 list_del(&info->global);
3271 if (info->dev)
3272 info->dev->dev.archdata.iommu = NULL;
3273
3274 spin_unlock_irqrestore(&device_domain_lock, flags1);
3275
Yu Zhao93a23a72009-05-18 13:51:37 +08003276 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf92009-04-04 01:45:37 +01003277 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003278 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003279 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003280
3281 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003282 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003283 */
3284 spin_lock_irqsave(&domain->iommu_lock, flags2);
3285 if (test_and_clear_bit(iommu->seq_id,
3286 &domain->iommu_bmp)) {
3287 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003288 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003289 }
3290 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3291
3292 free_devinfo_mem(info);
3293 spin_lock_irqsave(&device_domain_lock, flags1);
3294 }
3295 spin_unlock_irqrestore(&device_domain_lock, flags1);
3296}
3297
Weidong Han5e98c4b2008-12-08 23:03:27 +08003298/* domain id for virtual machine, it won't be set in context */
3299static unsigned long vm_domid;
3300
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003301static int vm_domain_min_agaw(struct dmar_domain *domain)
3302{
3303 int i;
3304 int min_agaw = domain->agaw;
3305
3306 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3307 for (; i < g_num_of_iommus; ) {
3308 if (min_agaw > g_iommus[i]->agaw)
3309 min_agaw = g_iommus[i]->agaw;
3310
3311 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3312 }
3313
3314 return min_agaw;
3315}
3316
Weidong Han5e98c4b2008-12-08 23:03:27 +08003317static struct dmar_domain *iommu_alloc_vm_domain(void)
3318{
3319 struct dmar_domain *domain;
3320
3321 domain = alloc_domain_mem();
3322 if (!domain)
3323 return NULL;
3324
3325 domain->id = vm_domid++;
3326 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3327 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3328
3329 return domain;
3330}
3331
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003332static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003333{
3334 int adjust_width;
3335
3336 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3337 spin_lock_init(&domain->mapping_lock);
3338 spin_lock_init(&domain->iommu_lock);
3339
3340 domain_reserve_special_ranges(domain);
3341
3342 /* calculate AGAW */
3343 domain->gaw = guest_width;
3344 adjust_width = guestwidth_to_adjustwidth(guest_width);
3345 domain->agaw = width_to_agaw(adjust_width);
3346
3347 INIT_LIST_HEAD(&domain->devices);
3348
3349 domain->iommu_count = 0;
3350 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003351 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003352
3353 /* always allocate the top pgd */
3354 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3355 if (!domain->pgd)
3356 return -ENOMEM;
3357 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3358 return 0;
3359}
3360
3361static void iommu_free_vm_domain(struct dmar_domain *domain)
3362{
3363 unsigned long flags;
3364 struct dmar_drhd_unit *drhd;
3365 struct intel_iommu *iommu;
3366 unsigned long i;
3367 unsigned long ndomains;
3368
3369 for_each_drhd_unit(drhd) {
3370 if (drhd->ignored)
3371 continue;
3372 iommu = drhd->iommu;
3373
3374 ndomains = cap_ndoms(iommu->cap);
3375 i = find_first_bit(iommu->domain_ids, ndomains);
3376 for (; i < ndomains; ) {
3377 if (iommu->domains[i] == domain) {
3378 spin_lock_irqsave(&iommu->lock, flags);
3379 clear_bit(i, iommu->domain_ids);
3380 iommu->domains[i] = NULL;
3381 spin_unlock_irqrestore(&iommu->lock, flags);
3382 break;
3383 }
3384 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3385 }
3386 }
3387}
3388
3389static void vm_domain_exit(struct dmar_domain *domain)
3390{
3391 u64 end;
3392
3393 /* Domain 0 is reserved, so dont process it */
3394 if (!domain)
3395 return;
3396
3397 vm_domain_remove_all_dev_info(domain);
3398 /* destroy iovas */
3399 put_iova_domain(&domain->iovad);
3400 end = DOMAIN_MAX_ADDR(domain->gaw);
3401 end = end & (~VTD_PAGE_MASK);
3402
3403 /* clear ptes */
3404 dma_pte_clear_range(domain, 0, end);
3405
3406 /* free page tables */
3407 dma_pte_free_pagetable(domain, 0, end);
3408
3409 iommu_free_vm_domain(domain);
3410 free_domain_mem(domain);
3411}
3412
Joerg Roedel5d450802008-12-03 14:52:32 +01003413static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003414{
Joerg Roedel5d450802008-12-03 14:52:32 +01003415 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003416
Joerg Roedel5d450802008-12-03 14:52:32 +01003417 dmar_domain = iommu_alloc_vm_domain();
3418 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003419 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003420 "intel_iommu_domain_init: dmar_domain == NULL\n");
3421 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003422 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003423 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003424 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003425 "intel_iommu_domain_init() failed\n");
3426 vm_domain_exit(dmar_domain);
3427 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003428 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003429 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003430
Joerg Roedel5d450802008-12-03 14:52:32 +01003431 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003432}
Kay, Allen M38717942008-09-09 18:37:29 +03003433
Joerg Roedel5d450802008-12-03 14:52:32 +01003434static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003435{
Joerg Roedel5d450802008-12-03 14:52:32 +01003436 struct dmar_domain *dmar_domain = domain->priv;
3437
3438 domain->priv = NULL;
3439 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003440}
Kay, Allen M38717942008-09-09 18:37:29 +03003441
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003442static int intel_iommu_attach_device(struct iommu_domain *domain,
3443 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003444{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003445 struct dmar_domain *dmar_domain = domain->priv;
3446 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003447 struct intel_iommu *iommu;
3448 int addr_width;
3449 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003450 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003451
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003452 /* normally pdev is not mapped */
3453 if (unlikely(domain_context_mapped(pdev))) {
3454 struct dmar_domain *old_domain;
3455
3456 old_domain = find_domain(pdev);
3457 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003458 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3459 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3460 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003461 else
3462 domain_remove_dev_info(old_domain);
3463 }
3464 }
3465
David Woodhouse276dbf92009-04-04 01:45:37 +01003466 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3467 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003468 if (!iommu)
3469 return -ENODEV;
3470
3471 /* check if this iommu agaw is sufficient for max mapped address */
3472 addr_width = agaw_to_width(iommu->agaw);
3473 end = DOMAIN_MAX_ADDR(addr_width);
3474 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003475 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003476 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3477 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003478 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003479 return -EFAULT;
3480 }
3481
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003482 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003483 if (ret)
3484 return ret;
3485
Yu Zhao93a23a72009-05-18 13:51:37 +08003486 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003487 return ret;
3488}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003489
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003490static void intel_iommu_detach_device(struct iommu_domain *domain,
3491 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003492{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003493 struct dmar_domain *dmar_domain = domain->priv;
3494 struct pci_dev *pdev = to_pci_dev(dev);
3495
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003496 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003497}
Kay, Allen M38717942008-09-09 18:37:29 +03003498
Joerg Roedeldde57a22008-12-03 15:04:09 +01003499static int intel_iommu_map_range(struct iommu_domain *domain,
3500 unsigned long iova, phys_addr_t hpa,
3501 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003502{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003503 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003504 u64 max_addr;
3505 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003506 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003507 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003508
Joerg Roedeldde57a22008-12-03 15:04:09 +01003509 if (iommu_prot & IOMMU_READ)
3510 prot |= DMA_PTE_READ;
3511 if (iommu_prot & IOMMU_WRITE)
3512 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003513 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3514 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003515
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003516 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003517 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003518 int min_agaw;
3519 u64 end;
3520
3521 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003522 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003523 addr_width = agaw_to_width(min_agaw);
3524 end = DOMAIN_MAX_ADDR(addr_width);
3525 end = end & VTD_PAGE_MASK;
3526 if (end < max_addr) {
3527 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3528 "sufficient for the mapped address (%llx)\n",
3529 __func__, min_agaw, max_addr);
3530 return -EFAULT;
3531 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003532 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003533 }
3534
Joerg Roedeldde57a22008-12-03 15:04:09 +01003535 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003536 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003537}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003538
Joerg Roedeldde57a22008-12-03 15:04:09 +01003539static void intel_iommu_unmap_range(struct iommu_domain *domain,
3540 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003541{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003542 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003543 dma_addr_t base;
3544
3545 /* The address might not be aligned */
3546 base = iova & VTD_PAGE_MASK;
3547 size = VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003548 dma_pte_clear_range(dmar_domain, base, base + size);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003549
Joerg Roedeldde57a22008-12-03 15:04:09 +01003550 if (dmar_domain->max_addr == base + size)
3551 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003552}
Kay, Allen M38717942008-09-09 18:37:29 +03003553
Joerg Roedeld14d6572008-12-03 15:06:57 +01003554static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3555 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003556{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003557 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003558 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003559 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003560
Joerg Roedeld14d6572008-12-03 15:06:57 +01003561 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03003562 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003563 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003564
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003565 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003566}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003567
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003568static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3569 unsigned long cap)
3570{
3571 struct dmar_domain *dmar_domain = domain->priv;
3572
3573 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3574 return dmar_domain->iommu_snooping;
3575
3576 return 0;
3577}
3578
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003579static struct iommu_ops intel_iommu_ops = {
3580 .domain_init = intel_iommu_domain_init,
3581 .domain_destroy = intel_iommu_domain_destroy,
3582 .attach_dev = intel_iommu_attach_device,
3583 .detach_dev = intel_iommu_detach_device,
3584 .map = intel_iommu_map_range,
3585 .unmap = intel_iommu_unmap_range,
3586 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003587 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003588};
David Woodhouse9af88142009-02-13 23:18:03 +00003589
3590static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3591{
3592 /*
3593 * Mobile 4 Series Chipset neglects to set RWBF capability,
3594 * but needs it:
3595 */
3596 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3597 rwbf_quirk = 1;
3598}
3599
3600DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);