blob: fc593adb049a77d8af2dbc3779eea5901abe06f1 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065#ifndef PHYSICAL_PAGE_MASK
66#define PHYSICAL_PAGE_MASK PAGE_MASK
67#endif
68
David Woodhousedd4e8312009-06-27 16:21:20 +010069/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
70 are never going to work. */
71static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
72{
73 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
74}
75
76static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
77{
78 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
79}
80static inline unsigned long page_to_dma_pfn(struct page *pg)
81{
82 return mm_to_dma_pfn(page_to_pfn(pg));
83}
84static inline unsigned long virt_to_dma_pfn(void *p)
85{
86 return page_to_dma_pfn(virt_to_page(p));
87}
88
Weidong Hand9630fe2008-12-08 11:06:32 +080089/* global iommu list, set NULL for ignored DMAR units */
90static struct intel_iommu **g_iommus;
91
David Woodhouse9af88142009-02-13 23:18:03 +000092static int rwbf_quirk;
93
Mark McLoughlin46b08e12008-11-20 15:49:44 +000094/*
95 * 0: Present
96 * 1-11: Reserved
97 * 12-63: Context Ptr (12 - (haw-1))
98 * 64-127: Reserved
99 */
100struct root_entry {
101 u64 val;
102 u64 rsvd1;
103};
104#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
105static inline bool root_present(struct root_entry *root)
106{
107 return (root->val & 1);
108}
109static inline void set_root_present(struct root_entry *root)
110{
111 root->val |= 1;
112}
113static inline void set_root_value(struct root_entry *root, unsigned long value)
114{
115 root->val |= value & VTD_PAGE_MASK;
116}
117
118static inline struct context_entry *
119get_context_addr_from_root(struct root_entry *root)
120{
121 return (struct context_entry *)
122 (root_present(root)?phys_to_virt(
123 root->val & VTD_PAGE_MASK) :
124 NULL);
125}
126
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000127/*
128 * low 64 bits:
129 * 0: present
130 * 1: fault processing disable
131 * 2-3: translation type
132 * 12-63: address space root
133 * high 64 bits:
134 * 0-2: address width
135 * 3-6: aval
136 * 8-23: domain id
137 */
138struct context_entry {
139 u64 lo;
140 u64 hi;
141};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000142
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000143static inline bool context_present(struct context_entry *context)
144{
145 return (context->lo & 1);
146}
147static inline void context_set_present(struct context_entry *context)
148{
149 context->lo |= 1;
150}
151
152static inline void context_set_fault_enable(struct context_entry *context)
153{
154 context->lo &= (((u64)-1) << 2) | 1;
155}
156
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000157static inline void context_set_translation_type(struct context_entry *context,
158 unsigned long value)
159{
160 context->lo &= (((u64)-1) << 4) | 3;
161 context->lo |= (value & 3) << 2;
162}
163
164static inline void context_set_address_root(struct context_entry *context,
165 unsigned long value)
166{
167 context->lo |= value & VTD_PAGE_MASK;
168}
169
170static inline void context_set_address_width(struct context_entry *context,
171 unsigned long value)
172{
173 context->hi |= value & 7;
174}
175
176static inline void context_set_domain_id(struct context_entry *context,
177 unsigned long value)
178{
179 context->hi |= (value & ((1 << 16) - 1)) << 8;
180}
181
182static inline void context_clear_entry(struct context_entry *context)
183{
184 context->lo = 0;
185 context->hi = 0;
186}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000187
Mark McLoughlin622ba122008-11-20 15:49:46 +0000188/*
189 * 0: readable
190 * 1: writable
191 * 2-6: reserved
192 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800193 * 8-10: available
194 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000195 * 12-63: Host physcial address
196 */
197struct dma_pte {
198 u64 val;
199};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000200
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000201static inline void dma_clear_pte(struct dma_pte *pte)
202{
203 pte->val = 0;
204}
205
206static inline void dma_set_pte_readable(struct dma_pte *pte)
207{
208 pte->val |= DMA_PTE_READ;
209}
210
211static inline void dma_set_pte_writable(struct dma_pte *pte)
212{
213 pte->val |= DMA_PTE_WRITE;
214}
215
Sheng Yang9cf06692009-03-18 15:33:07 +0800216static inline void dma_set_pte_snp(struct dma_pte *pte)
217{
218 pte->val |= DMA_PTE_SNP;
219}
220
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000221static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
222{
223 pte->val = (pte->val & ~3) | (prot & 3);
224}
225
226static inline u64 dma_pte_addr(struct dma_pte *pte)
227{
228 return (pte->val & VTD_PAGE_MASK);
229}
230
David Woodhousedd4e8312009-06-27 16:21:20 +0100231static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000232{
David Woodhousedd4e8312009-06-27 16:21:20 +0100233 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000234}
235
236static inline bool dma_pte_present(struct dma_pte *pte)
237{
238 return (pte->val & 3) != 0;
239}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000240
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700241/*
242 * This domain is a statically identity mapping domain.
243 * 1. This domain creats a static 1:1 mapping to all usable memory.
244 * 2. It maps to each iommu if successful.
245 * 3. Each iommu mapps to this domain if successful.
246 */
247struct dmar_domain *si_domain;
248
Weidong Han3b5410e2008-12-08 09:17:15 +0800249/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100250#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800251
Weidong Han1ce28fe2008-12-08 16:35:39 +0800252/* domain represents a virtual machine, more than one devices
253 * across iommus may be owned in one domain, e.g. kvm guest.
254 */
255#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
256
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700257/* si_domain contains mulitple devices */
258#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
259
Mark McLoughlin99126f72008-11-20 15:49:47 +0000260struct dmar_domain {
261 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800262 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000263
264 struct list_head devices; /* all devices' list */
265 struct iova_domain iovad; /* iova's that belong to this domain */
266
267 struct dma_pte *pgd; /* virtual address */
268 spinlock_t mapping_lock; /* page table lock */
269 int gaw; /* max guest address width */
270
271 /* adjusted guest address width, 0 is level 2 30-bit */
272 int agaw;
273
Weidong Han3b5410e2008-12-08 09:17:15 +0800274 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800275
276 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800277 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800278 int iommu_count; /* reference count of iommu */
279 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800280 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000281};
282
Mark McLoughlina647dac2008-11-20 15:49:48 +0000283/* PCI domain-device relationship */
284struct device_domain_info {
285 struct list_head link; /* link to domain siblings */
286 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100287 int segment; /* PCI domain */
288 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000289 u8 devfn; /* PCI devfn number */
290 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800291 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000292 struct dmar_domain *domain; /* pointer to domain */
293};
294
mark gross5e0d2a62008-03-04 15:22:08 -0800295static void flush_unmaps_timeout(unsigned long data);
296
297DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
298
mark gross80b20dd2008-04-18 13:53:58 -0700299#define HIGH_WATER_MARK 250
300struct deferred_flush_tables {
301 int next;
302 struct iova *iova[HIGH_WATER_MARK];
303 struct dmar_domain *domain[HIGH_WATER_MARK];
304};
305
306static struct deferred_flush_tables *deferred_flush;
307
mark gross5e0d2a62008-03-04 15:22:08 -0800308/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800309static int g_num_of_iommus;
310
311static DEFINE_SPINLOCK(async_umap_flush_lock);
312static LIST_HEAD(unmaps_to_do);
313
314static int timer_on;
315static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700317static void domain_remove_dev_info(struct dmar_domain *domain);
318
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800319#ifdef CONFIG_DMAR_DEFAULT_ON
320int dmar_disabled = 0;
321#else
322int dmar_disabled = 1;
323#endif /*CONFIG_DMAR_DEFAULT_ON*/
324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700326static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800327static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700328
329#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
330static DEFINE_SPINLOCK(device_domain_lock);
331static LIST_HEAD(device_domain_list);
332
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100333static struct iommu_ops intel_iommu_ops;
334
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700335static int __init intel_iommu_setup(char *str)
336{
337 if (!str)
338 return -EINVAL;
339 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800340 if (!strncmp(str, "on", 2)) {
341 dmar_disabled = 0;
342 printk(KERN_INFO "Intel-IOMMU: enabled\n");
343 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700344 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800345 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700346 } else if (!strncmp(str, "igfx_off", 8)) {
347 dmar_map_gfx = 0;
348 printk(KERN_INFO
349 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700350 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800351 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700352 "Intel-IOMMU: Forcing DAC for PCI devices\n");
353 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800354 } else if (!strncmp(str, "strict", 6)) {
355 printk(KERN_INFO
356 "Intel-IOMMU: disable batched IOTLB flush\n");
357 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700358 }
359
360 str += strcspn(str, ",");
361 while (*str == ',')
362 str++;
363 }
364 return 0;
365}
366__setup("intel_iommu=", intel_iommu_setup);
367
368static struct kmem_cache *iommu_domain_cache;
369static struct kmem_cache *iommu_devinfo_cache;
370static struct kmem_cache *iommu_iova_cache;
371
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700372static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
373{
374 unsigned int flags;
375 void *vaddr;
376
377 /* trying to avoid low memory issues */
378 flags = current->flags & PF_MEMALLOC;
379 current->flags |= PF_MEMALLOC;
380 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
381 current->flags &= (~PF_MEMALLOC | flags);
382 return vaddr;
383}
384
385
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700386static inline void *alloc_pgtable_page(void)
387{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700388 unsigned int flags;
389 void *vaddr;
390
391 /* trying to avoid low memory issues */
392 flags = current->flags & PF_MEMALLOC;
393 current->flags |= PF_MEMALLOC;
394 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
395 current->flags &= (~PF_MEMALLOC | flags);
396 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700397}
398
399static inline void free_pgtable_page(void *vaddr)
400{
401 free_page((unsigned long)vaddr);
402}
403
404static inline void *alloc_domain_mem(void)
405{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700406 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407}
408
Kay, Allen M38717942008-09-09 18:37:29 +0300409static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700410{
411 kmem_cache_free(iommu_domain_cache, vaddr);
412}
413
414static inline void * alloc_devinfo_mem(void)
415{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700416 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700417}
418
419static inline void free_devinfo_mem(void *vaddr)
420{
421 kmem_cache_free(iommu_devinfo_cache, vaddr);
422}
423
424struct iova *alloc_iova_mem(void)
425{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700426 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427}
428
429void free_iova_mem(struct iova *iova)
430{
431 kmem_cache_free(iommu_iova_cache, iova);
432}
433
Weidong Han1b573682008-12-08 15:34:06 +0800434
435static inline int width_to_agaw(int width);
436
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700437static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800438{
439 unsigned long sagaw;
440 int agaw = -1;
441
442 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700443 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800444 agaw >= 0; agaw--) {
445 if (test_bit(agaw, &sagaw))
446 break;
447 }
448
449 return agaw;
450}
451
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700452/*
453 * Calculate max SAGAW for each iommu.
454 */
455int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
456{
457 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
458}
459
460/*
461 * calculate agaw for each iommu.
462 * "SAGAW" may be different across iommus, use a default agaw, and
463 * get a supported less agaw for iommus that don't support the default agaw.
464 */
465int iommu_calculate_agaw(struct intel_iommu *iommu)
466{
467 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
468}
469
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700470/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800471static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
472{
473 int iommu_id;
474
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700475 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800476 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700477 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800478
Weidong Han8c11e792008-12-08 15:29:22 +0800479 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
480 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
481 return NULL;
482
483 return g_iommus[iommu_id];
484}
485
Weidong Han8e6040972008-12-08 15:49:06 +0800486static void domain_update_iommu_coherency(struct dmar_domain *domain)
487{
488 int i;
489
490 domain->iommu_coherency = 1;
491
492 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
493 for (; i < g_num_of_iommus; ) {
494 if (!ecap_coherent(g_iommus[i]->ecap)) {
495 domain->iommu_coherency = 0;
496 break;
497 }
498 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
499 }
500}
501
Sheng Yang58c610b2009-03-18 15:33:05 +0800502static void domain_update_iommu_snooping(struct dmar_domain *domain)
503{
504 int i;
505
506 domain->iommu_snooping = 1;
507
508 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
509 for (; i < g_num_of_iommus; ) {
510 if (!ecap_sc_support(g_iommus[i]->ecap)) {
511 domain->iommu_snooping = 0;
512 break;
513 }
514 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
515 }
516}
517
518/* Some capabilities may be different across iommus */
519static void domain_update_iommu_cap(struct dmar_domain *domain)
520{
521 domain_update_iommu_coherency(domain);
522 domain_update_iommu_snooping(domain);
523}
524
David Woodhouse276dbf992009-04-04 01:45:37 +0100525static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800526{
527 struct dmar_drhd_unit *drhd = NULL;
528 int i;
529
530 for_each_drhd_unit(drhd) {
531 if (drhd->ignored)
532 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100533 if (segment != drhd->segment)
534 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800535
David Woodhouse924b6232009-04-04 00:39:25 +0100536 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000537 if (drhd->devices[i] &&
538 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800539 drhd->devices[i]->devfn == devfn)
540 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700541 if (drhd->devices[i] &&
542 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100543 drhd->devices[i]->subordinate->number <= bus &&
544 drhd->devices[i]->subordinate->subordinate >= bus)
545 return drhd->iommu;
546 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800547
548 if (drhd->include_all)
549 return drhd->iommu;
550 }
551
552 return NULL;
553}
554
Weidong Han5331fe62008-12-08 23:00:00 +0800555static void domain_flush_cache(struct dmar_domain *domain,
556 void *addr, int size)
557{
558 if (!domain->iommu_coherency)
559 clflush_cache_range(addr, size);
560}
561
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700562/* Gets context entry for a given bus and devfn */
563static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
564 u8 bus, u8 devfn)
565{
566 struct root_entry *root;
567 struct context_entry *context;
568 unsigned long phy_addr;
569 unsigned long flags;
570
571 spin_lock_irqsave(&iommu->lock, flags);
572 root = &iommu->root_entry[bus];
573 context = get_context_addr_from_root(root);
574 if (!context) {
575 context = (struct context_entry *)alloc_pgtable_page();
576 if (!context) {
577 spin_unlock_irqrestore(&iommu->lock, flags);
578 return NULL;
579 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700580 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700581 phy_addr = virt_to_phys((void *)context);
582 set_root_value(root, phy_addr);
583 set_root_present(root);
584 __iommu_flush_cache(iommu, root, sizeof(*root));
585 }
586 spin_unlock_irqrestore(&iommu->lock, flags);
587 return &context[devfn];
588}
589
590static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
591{
592 struct root_entry *root;
593 struct context_entry *context;
594 int ret;
595 unsigned long flags;
596
597 spin_lock_irqsave(&iommu->lock, flags);
598 root = &iommu->root_entry[bus];
599 context = get_context_addr_from_root(root);
600 if (!context) {
601 ret = 0;
602 goto out;
603 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000604 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700605out:
606 spin_unlock_irqrestore(&iommu->lock, flags);
607 return ret;
608}
609
610static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
611{
612 struct root_entry *root;
613 struct context_entry *context;
614 unsigned long flags;
615
616 spin_lock_irqsave(&iommu->lock, flags);
617 root = &iommu->root_entry[bus];
618 context = get_context_addr_from_root(root);
619 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000620 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700621 __iommu_flush_cache(iommu, &context[devfn], \
622 sizeof(*context));
623 }
624 spin_unlock_irqrestore(&iommu->lock, flags);
625}
626
627static void free_context_table(struct intel_iommu *iommu)
628{
629 struct root_entry *root;
630 int i;
631 unsigned long flags;
632 struct context_entry *context;
633
634 spin_lock_irqsave(&iommu->lock, flags);
635 if (!iommu->root_entry) {
636 goto out;
637 }
638 for (i = 0; i < ROOT_ENTRY_NR; i++) {
639 root = &iommu->root_entry[i];
640 context = get_context_addr_from_root(root);
641 if (context)
642 free_pgtable_page(context);
643 }
644 free_pgtable_page(iommu->root_entry);
645 iommu->root_entry = NULL;
646out:
647 spin_unlock_irqrestore(&iommu->lock, flags);
648}
649
650/* page table handling */
651#define LEVEL_STRIDE (9)
652#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
653
654static inline int agaw_to_level(int agaw)
655{
656 return agaw + 2;
657}
658
659static inline int agaw_to_width(int agaw)
660{
661 return 30 + agaw * LEVEL_STRIDE;
662
663}
664
665static inline int width_to_agaw(int width)
666{
667 return (width - 30) / LEVEL_STRIDE;
668}
669
670static inline unsigned int level_to_offset_bits(int level)
671{
David Woodhouse6660c632009-06-27 22:41:00 +0100672 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673}
674
David Woodhouse77dfa562009-06-27 16:40:08 +0100675static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700676{
David Woodhouse6660c632009-06-27 22:41:00 +0100677 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700678}
679
David Woodhouse6660c632009-06-27 22:41:00 +0100680static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700681{
David Woodhouse6660c632009-06-27 22:41:00 +0100682 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683}
684
David Woodhouse6660c632009-06-27 22:41:00 +0100685static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700686{
David Woodhouse6660c632009-06-27 22:41:00 +0100687 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688}
689
David Woodhouse6660c632009-06-27 22:41:00 +0100690static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700691{
David Woodhouse6660c632009-06-27 22:41:00 +0100692 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700693}
694
695static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
696{
697 int addr_width = agaw_to_width(domain->agaw);
698 struct dma_pte *parent, *pte = NULL;
699 int level = agaw_to_level(domain->agaw);
700 int offset;
701 unsigned long flags;
702
703 BUG_ON(!domain->pgd);
David Woodhouse66eae842009-06-27 19:00:32 +0100704 BUG_ON(addr >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705 parent = domain->pgd;
706
707 spin_lock_irqsave(&domain->mapping_lock, flags);
708 while (level > 0) {
709 void *tmp_page;
710
David Woodhouse77dfa562009-06-27 16:40:08 +0100711 offset = pfn_level_offset(addr >> VTD_PAGE_SHIFT, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700712 pte = &parent[offset];
713 if (level == 1)
714 break;
715
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000716 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700717 tmp_page = alloc_pgtable_page();
718
719 if (!tmp_page) {
720 spin_unlock_irqrestore(&domain->mapping_lock,
721 flags);
722 return NULL;
723 }
Weidong Han5331fe62008-12-08 23:00:00 +0800724 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
David Woodhousedd4e8312009-06-27 16:21:20 +0100725 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700726 /*
727 * high level table always sets r/w, last level page
728 * table control read/write
729 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000730 dma_set_pte_readable(pte);
731 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800732 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000734 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 level--;
736 }
737
738 spin_unlock_irqrestore(&domain->mapping_lock, flags);
739 return pte;
740}
741
742/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100743static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
744 unsigned long pfn,
745 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700746{
747 struct dma_pte *parent, *pte = NULL;
748 int total = agaw_to_level(domain->agaw);
749 int offset;
750
751 parent = domain->pgd;
752 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100753 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 pte = &parent[offset];
755 if (level == total)
756 return pte;
757
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000758 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000760 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700761 total--;
762 }
763 return NULL;
764}
765
766/* clear one page's page table */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100767static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700768{
769 struct dma_pte *pte = NULL;
770
771 /* get last level pte */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100772 pte = dma_pfn_level_pte(domain, pfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700773
774 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000775 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800776 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700777 }
778}
779
780/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100781static void dma_pte_clear_range(struct dmar_domain *domain,
782 unsigned long start_pfn,
783 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700784{
David Woodhouse04b18e62009-06-27 19:15:01 +0100785 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786
David Woodhouse04b18e62009-06-27 19:15:01 +0100787 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100788 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100789
David Woodhouse04b18e62009-06-27 19:15:01 +0100790 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100791 while (start_pfn <= last_pfn) {
David Woodhouse04b18e62009-06-27 19:15:01 +0100792 dma_pte_clear_one(domain, start_pfn);
793 start_pfn++;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794 }
795}
796
797/* free page table pages. last level pte should already be cleared */
798static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100799 unsigned long start_pfn,
800 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801{
David Woodhouse6660c632009-06-27 22:41:00 +0100802 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700803 struct dma_pte *pte;
804 int total = agaw_to_level(domain->agaw);
805 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100806 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700807
David Woodhouse6660c632009-06-27 22:41:00 +0100808 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
809 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810
811 /* we don't need lock here, nobody else touches the iova range */
812 level = 2;
813 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100814 tmp = align_to_level(start_pfn, level);
815
816 /* Only clear this pte/pmd if we're asked to clear its
817 _whole_ range */
818 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 return;
820
David Woodhouse6660c632009-06-27 22:41:00 +0100821 while (tmp <= last_pfn) {
822 pte = dma_pfn_level_pte(domain, tmp, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 if (pte) {
824 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000825 phys_to_virt(dma_pte_addr(pte)));
826 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800827 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 }
829 tmp += level_size(level);
830 }
831 level++;
832 }
833 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100834 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 free_pgtable_page(domain->pgd);
836 domain->pgd = NULL;
837 }
838}
839
840/* iommu handling */
841static int iommu_alloc_root_entry(struct intel_iommu *iommu)
842{
843 struct root_entry *root;
844 unsigned long flags;
845
846 root = (struct root_entry *)alloc_pgtable_page();
847 if (!root)
848 return -ENOMEM;
849
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700850 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700851
852 spin_lock_irqsave(&iommu->lock, flags);
853 iommu->root_entry = root;
854 spin_unlock_irqrestore(&iommu->lock, flags);
855
856 return 0;
857}
858
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700859static void iommu_set_root_entry(struct intel_iommu *iommu)
860{
861 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100862 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 unsigned long flag;
864
865 addr = iommu->root_entry;
866
867 spin_lock_irqsave(&iommu->register_lock, flag);
868 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
869
David Woodhousec416daa2009-05-10 20:30:58 +0100870 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871
872 /* Make sure hardware complete it */
873 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100874 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875
876 spin_unlock_irqrestore(&iommu->register_lock, flag);
877}
878
879static void iommu_flush_write_buffer(struct intel_iommu *iommu)
880{
881 u32 val;
882 unsigned long flag;
883
David Woodhouse9af88142009-02-13 23:18:03 +0000884 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700885 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886
887 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100888 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889
890 /* Make sure hardware complete it */
891 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100892 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700893
894 spin_unlock_irqrestore(&iommu->register_lock, flag);
895}
896
897/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100898static void __iommu_flush_context(struct intel_iommu *iommu,
899 u16 did, u16 source_id, u8 function_mask,
900 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700901{
902 u64 val = 0;
903 unsigned long flag;
904
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700905 switch (type) {
906 case DMA_CCMD_GLOBAL_INVL:
907 val = DMA_CCMD_GLOBAL_INVL;
908 break;
909 case DMA_CCMD_DOMAIN_INVL:
910 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
911 break;
912 case DMA_CCMD_DEVICE_INVL:
913 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
914 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
915 break;
916 default:
917 BUG();
918 }
919 val |= DMA_CCMD_ICC;
920
921 spin_lock_irqsave(&iommu->register_lock, flag);
922 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
923
924 /* Make sure hardware complete it */
925 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
926 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
927
928 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700929}
930
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700931/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100932static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
933 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934{
935 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
936 u64 val = 0, val_iva = 0;
937 unsigned long flag;
938
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939 switch (type) {
940 case DMA_TLB_GLOBAL_FLUSH:
941 /* global flush doesn't need set IVA_REG */
942 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
943 break;
944 case DMA_TLB_DSI_FLUSH:
945 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
946 break;
947 case DMA_TLB_PSI_FLUSH:
948 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
949 /* Note: always flush non-leaf currently */
950 val_iva = size_order | addr;
951 break;
952 default:
953 BUG();
954 }
955 /* Note: set drain read/write */
956#if 0
957 /*
958 * This is probably to be super secure.. Looks like we can
959 * ignore it without any impact.
960 */
961 if (cap_read_drain(iommu->cap))
962 val |= DMA_TLB_READ_DRAIN;
963#endif
964 if (cap_write_drain(iommu->cap))
965 val |= DMA_TLB_WRITE_DRAIN;
966
967 spin_lock_irqsave(&iommu->register_lock, flag);
968 /* Note: Only uses first TLB reg currently */
969 if (val_iva)
970 dmar_writeq(iommu->reg + tlb_offset, val_iva);
971 dmar_writeq(iommu->reg + tlb_offset + 8, val);
972
973 /* Make sure hardware complete it */
974 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
975 dmar_readq, (!(val & DMA_TLB_IVT)), val);
976
977 spin_unlock_irqrestore(&iommu->register_lock, flag);
978
979 /* check IOTLB invalidation granularity */
980 if (DMA_TLB_IAIG(val) == 0)
981 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
982 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
983 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700984 (unsigned long long)DMA_TLB_IIRG(type),
985 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700986}
987
Yu Zhao93a23a72009-05-18 13:51:37 +0800988static struct device_domain_info *iommu_support_dev_iotlb(
989 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700990{
Yu Zhao93a23a72009-05-18 13:51:37 +0800991 int found = 0;
992 unsigned long flags;
993 struct device_domain_info *info;
994 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
995
996 if (!ecap_dev_iotlb_support(iommu->ecap))
997 return NULL;
998
999 if (!iommu->qi)
1000 return NULL;
1001
1002 spin_lock_irqsave(&device_domain_lock, flags);
1003 list_for_each_entry(info, &domain->devices, link)
1004 if (info->bus == bus && info->devfn == devfn) {
1005 found = 1;
1006 break;
1007 }
1008 spin_unlock_irqrestore(&device_domain_lock, flags);
1009
1010 if (!found || !info->dev)
1011 return NULL;
1012
1013 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1014 return NULL;
1015
1016 if (!dmar_find_matched_atsr_unit(info->dev))
1017 return NULL;
1018
1019 info->iommu = iommu;
1020
1021 return info;
1022}
1023
1024static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1025{
1026 if (!info)
1027 return;
1028
1029 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1030}
1031
1032static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1033{
1034 if (!info->dev || !pci_ats_enabled(info->dev))
1035 return;
1036
1037 pci_disable_ats(info->dev);
1038}
1039
1040static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1041 u64 addr, unsigned mask)
1042{
1043 u16 sid, qdep;
1044 unsigned long flags;
1045 struct device_domain_info *info;
1046
1047 spin_lock_irqsave(&device_domain_lock, flags);
1048 list_for_each_entry(info, &domain->devices, link) {
1049 if (!info->dev || !pci_ats_enabled(info->dev))
1050 continue;
1051
1052 sid = info->bus << 8 | info->devfn;
1053 qdep = pci_ats_queue_depth(info->dev);
1054 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1055 }
1056 spin_unlock_irqrestore(&device_domain_lock, flags);
1057}
1058
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001059static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1060 u64 addr, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001061{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001062 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001063
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001064 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065 BUG_ON(pages == 0);
1066
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001067 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001068 * Fallback to domain selective flush if no PSI support or the size is
1069 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070 * PSI requires page size to be 2 ^ x, and the base address is naturally
1071 * aligned to the size
1072 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001073 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1074 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001075 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001076 else
1077 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1078 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001079
1080 /*
1081 * In caching mode, domain ID 0 is reserved for non-present to present
1082 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1083 */
1084 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001085 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086}
1087
mark grossf8bab732008-02-08 04:18:38 -08001088static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1089{
1090 u32 pmen;
1091 unsigned long flags;
1092
1093 spin_lock_irqsave(&iommu->register_lock, flags);
1094 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1095 pmen &= ~DMA_PMEN_EPM;
1096 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1097
1098 /* wait for the protected region status bit to clear */
1099 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1100 readl, !(pmen & DMA_PMEN_PRS), pmen);
1101
1102 spin_unlock_irqrestore(&iommu->register_lock, flags);
1103}
1104
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001105static int iommu_enable_translation(struct intel_iommu *iommu)
1106{
1107 u32 sts;
1108 unsigned long flags;
1109
1110 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001111 iommu->gcmd |= DMA_GCMD_TE;
1112 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113
1114 /* Make sure hardware complete it */
1115 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001116 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001117
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001118 spin_unlock_irqrestore(&iommu->register_lock, flags);
1119 return 0;
1120}
1121
1122static int iommu_disable_translation(struct intel_iommu *iommu)
1123{
1124 u32 sts;
1125 unsigned long flag;
1126
1127 spin_lock_irqsave(&iommu->register_lock, flag);
1128 iommu->gcmd &= ~DMA_GCMD_TE;
1129 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1130
1131 /* Make sure hardware complete it */
1132 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001133 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001134
1135 spin_unlock_irqrestore(&iommu->register_lock, flag);
1136 return 0;
1137}
1138
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001139
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140static int iommu_init_domains(struct intel_iommu *iommu)
1141{
1142 unsigned long ndomains;
1143 unsigned long nlongs;
1144
1145 ndomains = cap_ndoms(iommu->cap);
1146 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1147 nlongs = BITS_TO_LONGS(ndomains);
1148
1149 /* TBD: there might be 64K domains,
1150 * consider other allocation for future chip
1151 */
1152 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1153 if (!iommu->domain_ids) {
1154 printk(KERN_ERR "Allocating domain id array failed\n");
1155 return -ENOMEM;
1156 }
1157 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1158 GFP_KERNEL);
1159 if (!iommu->domains) {
1160 printk(KERN_ERR "Allocating domain array failed\n");
1161 kfree(iommu->domain_ids);
1162 return -ENOMEM;
1163 }
1164
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001165 spin_lock_init(&iommu->lock);
1166
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167 /*
1168 * if Caching mode is set, then invalid translations are tagged
1169 * with domainid 0. Hence we need to pre-allocate it.
1170 */
1171 if (cap_caching_mode(iommu->cap))
1172 set_bit(0, iommu->domain_ids);
1173 return 0;
1174}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001175
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
1177static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001178static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001179
1180void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181{
1182 struct dmar_domain *domain;
1183 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001184 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001185
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001186 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1187 for (; i < cap_ndoms(iommu->cap); ) {
1188 domain = iommu->domains[i];
1189 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001190
1191 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001192 if (--domain->iommu_count == 0) {
1193 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1194 vm_domain_exit(domain);
1195 else
1196 domain_exit(domain);
1197 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001198 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1199
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200 i = find_next_bit(iommu->domain_ids,
1201 cap_ndoms(iommu->cap), i+1);
1202 }
1203
1204 if (iommu->gcmd & DMA_GCMD_TE)
1205 iommu_disable_translation(iommu);
1206
1207 if (iommu->irq) {
1208 set_irq_data(iommu->irq, NULL);
1209 /* This will mask the irq */
1210 free_irq(iommu->irq, iommu);
1211 destroy_irq(iommu->irq);
1212 }
1213
1214 kfree(iommu->domains);
1215 kfree(iommu->domain_ids);
1216
Weidong Hand9630fe2008-12-08 11:06:32 +08001217 g_iommus[iommu->seq_id] = NULL;
1218
1219 /* if all iommus are freed, free g_iommus */
1220 for (i = 0; i < g_num_of_iommus; i++) {
1221 if (g_iommus[i])
1222 break;
1223 }
1224
1225 if (i == g_num_of_iommus)
1226 kfree(g_iommus);
1227
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 /* free context mapping */
1229 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001230}
1231
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001232static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001233{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235
1236 domain = alloc_domain_mem();
1237 if (!domain)
1238 return NULL;
1239
Weidong Han8c11e792008-12-08 15:29:22 +08001240 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001241 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242
1243 return domain;
1244}
1245
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001246static int iommu_attach_domain(struct dmar_domain *domain,
1247 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001248{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001249 int num;
1250 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251 unsigned long flags;
1252
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001253 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001254
1255 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001256
1257 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1258 if (num >= ndomains) {
1259 spin_unlock_irqrestore(&iommu->lock, flags);
1260 printk(KERN_ERR "IOMMU: no free domain ids\n");
1261 return -ENOMEM;
1262 }
1263
1264 domain->id = num;
1265 set_bit(num, iommu->domain_ids);
1266 set_bit(iommu->seq_id, &domain->iommu_bmp);
1267 iommu->domains[num] = domain;
1268 spin_unlock_irqrestore(&iommu->lock, flags);
1269
1270 return 0;
1271}
1272
1273static void iommu_detach_domain(struct dmar_domain *domain,
1274 struct intel_iommu *iommu)
1275{
1276 unsigned long flags;
1277 int num, ndomains;
1278 int found = 0;
1279
1280 spin_lock_irqsave(&iommu->lock, flags);
1281 ndomains = cap_ndoms(iommu->cap);
1282 num = find_first_bit(iommu->domain_ids, ndomains);
1283 for (; num < ndomains; ) {
1284 if (iommu->domains[num] == domain) {
1285 found = 1;
1286 break;
1287 }
1288 num = find_next_bit(iommu->domain_ids,
1289 cap_ndoms(iommu->cap), num+1);
1290 }
1291
1292 if (found) {
1293 clear_bit(num, iommu->domain_ids);
1294 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1295 iommu->domains[num] = NULL;
1296 }
Weidong Han8c11e792008-12-08 15:29:22 +08001297 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001298}
1299
1300static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001301static struct lock_class_key reserved_alloc_key;
1302static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303
1304static void dmar_init_reserved_ranges(void)
1305{
1306 struct pci_dev *pdev = NULL;
1307 struct iova *iova;
1308 int i;
1309 u64 addr, size;
1310
David Millerf6611972008-02-06 01:36:23 -08001311 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312
Mark Gross8a443df2008-03-04 14:59:31 -08001313 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1314 &reserved_alloc_key);
1315 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1316 &reserved_rbtree_key);
1317
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001318 /* IOAPIC ranges shouldn't be accessed by DMA */
1319 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1320 IOVA_PFN(IOAPIC_RANGE_END));
1321 if (!iova)
1322 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1323
1324 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1325 for_each_pci_dev(pdev) {
1326 struct resource *r;
1327
1328 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1329 r = &pdev->resource[i];
1330 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1331 continue;
1332 addr = r->start;
David Woodhousefd18de52009-05-10 23:57:41 +01001333 addr &= PHYSICAL_PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001335 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1337 IOVA_PFN(size + addr) - 1);
1338 if (!iova)
1339 printk(KERN_ERR "Reserve iova failed\n");
1340 }
1341 }
1342
1343}
1344
1345static void domain_reserve_special_ranges(struct dmar_domain *domain)
1346{
1347 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1348}
1349
1350static inline int guestwidth_to_adjustwidth(int gaw)
1351{
1352 int agaw;
1353 int r = (gaw - 12) % 9;
1354
1355 if (r == 0)
1356 agaw = gaw;
1357 else
1358 agaw = gaw + 9 - r;
1359 if (agaw > 64)
1360 agaw = 64;
1361 return agaw;
1362}
1363
1364static int domain_init(struct dmar_domain *domain, int guest_width)
1365{
1366 struct intel_iommu *iommu;
1367 int adjust_width, agaw;
1368 unsigned long sagaw;
1369
David Millerf6611972008-02-06 01:36:23 -08001370 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001371 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001372 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373
1374 domain_reserve_special_ranges(domain);
1375
1376 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001377 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378 if (guest_width > cap_mgaw(iommu->cap))
1379 guest_width = cap_mgaw(iommu->cap);
1380 domain->gaw = guest_width;
1381 adjust_width = guestwidth_to_adjustwidth(guest_width);
1382 agaw = width_to_agaw(adjust_width);
1383 sagaw = cap_sagaw(iommu->cap);
1384 if (!test_bit(agaw, &sagaw)) {
1385 /* hardware doesn't support it, choose a bigger one */
1386 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1387 agaw = find_next_bit(&sagaw, 5, agaw);
1388 if (agaw >= 5)
1389 return -ENODEV;
1390 }
1391 domain->agaw = agaw;
1392 INIT_LIST_HEAD(&domain->devices);
1393
Weidong Han8e6040972008-12-08 15:49:06 +08001394 if (ecap_coherent(iommu->ecap))
1395 domain->iommu_coherency = 1;
1396 else
1397 domain->iommu_coherency = 0;
1398
Sheng Yang58c610b2009-03-18 15:33:05 +08001399 if (ecap_sc_support(iommu->ecap))
1400 domain->iommu_snooping = 1;
1401 else
1402 domain->iommu_snooping = 0;
1403
Weidong Hanc7151a82008-12-08 22:51:37 +08001404 domain->iommu_count = 1;
1405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 /* always allocate the top pgd */
1407 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1408 if (!domain->pgd)
1409 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001410 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 return 0;
1412}
1413
1414static void domain_exit(struct dmar_domain *domain)
1415{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001416 struct dmar_drhd_unit *drhd;
1417 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418
1419 /* Domain 0 is reserved, so dont process it */
1420 if (!domain)
1421 return;
1422
1423 domain_remove_dev_info(domain);
1424 /* destroy iovas */
1425 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426
1427 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001428 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001431 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001433 for_each_active_iommu(iommu, drhd)
1434 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1435 iommu_detach_domain(domain, iommu);
1436
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 free_domain_mem(domain);
1438}
1439
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001440static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1441 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442{
1443 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001445 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001446 struct dma_pte *pgd;
1447 unsigned long num;
1448 unsigned long ndomains;
1449 int id;
1450 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001451 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452
1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1454 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001457 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1458 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001459
David Woodhouse276dbf992009-04-04 01:45:37 +01001460 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001461 if (!iommu)
1462 return -ENODEV;
1463
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 context = device_to_context_entry(iommu, bus, devfn);
1465 if (!context)
1466 return -ENOMEM;
1467 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001468 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469 spin_unlock_irqrestore(&iommu->lock, flags);
1470 return 0;
1471 }
1472
Weidong Hanea6606b2008-12-08 23:08:15 +08001473 id = domain->id;
1474 pgd = domain->pgd;
1475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001476 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1477 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001478 int found = 0;
1479
1480 /* find an available domain id for this device in iommu */
1481 ndomains = cap_ndoms(iommu->cap);
1482 num = find_first_bit(iommu->domain_ids, ndomains);
1483 for (; num < ndomains; ) {
1484 if (iommu->domains[num] == domain) {
1485 id = num;
1486 found = 1;
1487 break;
1488 }
1489 num = find_next_bit(iommu->domain_ids,
1490 cap_ndoms(iommu->cap), num+1);
1491 }
1492
1493 if (found == 0) {
1494 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1495 if (num >= ndomains) {
1496 spin_unlock_irqrestore(&iommu->lock, flags);
1497 printk(KERN_ERR "IOMMU: no free domain ids\n");
1498 return -EFAULT;
1499 }
1500
1501 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001502 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001503 iommu->domains[num] = domain;
1504 id = num;
1505 }
1506
1507 /* Skip top levels of page tables for
1508 * iommu which has less agaw than default.
1509 */
1510 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1511 pgd = phys_to_virt(dma_pte_addr(pgd));
1512 if (!dma_pte_present(pgd)) {
1513 spin_unlock_irqrestore(&iommu->lock, flags);
1514 return -ENOMEM;
1515 }
1516 }
1517 }
1518
1519 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001520
Yu Zhao93a23a72009-05-18 13:51:37 +08001521 if (translation != CONTEXT_TT_PASS_THROUGH) {
1522 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1523 translation = info ? CONTEXT_TT_DEV_IOTLB :
1524 CONTEXT_TT_MULTI_LEVEL;
1525 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001526 /*
1527 * In pass through mode, AW must be programmed to indicate the largest
1528 * AGAW value supported by hardware. And ASR is ignored by hardware.
1529 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001532 else {
1533 context_set_address_root(context, virt_to_phys(pgd));
1534 context_set_address_width(context, iommu->agaw);
1535 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001536
1537 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001538 context_set_fault_enable(context);
1539 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001540 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001542 /*
1543 * It's a non-present to present mapping. If hardware doesn't cache
1544 * non-present entry we only need to flush the write-buffer. If the
1545 * _does_ cache non-present entries, then it does so in the special
1546 * domain #0, which we have to flush:
1547 */
1548 if (cap_caching_mode(iommu->cap)) {
1549 iommu->flush.flush_context(iommu, 0,
1550 (((u16)bus) << 8) | devfn,
1551 DMA_CCMD_MASK_NOBIT,
1552 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001553 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001554 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001556 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001557 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001559
1560 spin_lock_irqsave(&domain->iommu_lock, flags);
1561 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1562 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001563 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001564 }
1565 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 return 0;
1567}
1568
1569static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001570domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1571 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572{
1573 int ret;
1574 struct pci_dev *tmp, *parent;
1575
David Woodhouse276dbf992009-04-04 01:45:37 +01001576 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001577 pdev->bus->number, pdev->devfn,
1578 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579 if (ret)
1580 return ret;
1581
1582 /* dependent device mapping */
1583 tmp = pci_find_upstream_pcie_bridge(pdev);
1584 if (!tmp)
1585 return 0;
1586 /* Secondary interface's bus number and devfn 0 */
1587 parent = pdev->bus->self;
1588 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001589 ret = domain_context_mapping_one(domain,
1590 pci_domain_nr(parent->bus),
1591 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001592 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593 if (ret)
1594 return ret;
1595 parent = parent->bus->self;
1596 }
1597 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1598 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001599 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001600 tmp->subordinate->number, 0,
1601 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001602 else /* this is a legacy PCI bridge */
1603 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001604 pci_domain_nr(tmp->bus),
1605 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001606 tmp->devfn,
1607 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608}
1609
Weidong Han5331fe62008-12-08 23:00:00 +08001610static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001611{
1612 int ret;
1613 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001614 struct intel_iommu *iommu;
1615
David Woodhouse276dbf992009-04-04 01:45:37 +01001616 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1617 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001618 if (!iommu)
1619 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620
David Woodhouse276dbf992009-04-04 01:45:37 +01001621 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001622 if (!ret)
1623 return ret;
1624 /* dependent device mapping */
1625 tmp = pci_find_upstream_pcie_bridge(pdev);
1626 if (!tmp)
1627 return ret;
1628 /* Secondary interface's bus number and devfn 0 */
1629 parent = pdev->bus->self;
1630 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001631 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001632 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 if (!ret)
1634 return ret;
1635 parent = parent->bus->self;
1636 }
1637 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001638 return device_context_mapped(iommu, tmp->subordinate->number,
1639 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001641 return device_context_mapped(iommu, tmp->bus->number,
1642 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643}
1644
1645static int
1646domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1647 u64 hpa, size_t size, int prot)
1648{
1649 u64 start_pfn, end_pfn;
1650 struct dma_pte *pte;
1651 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001652 int addr_width = agaw_to_width(domain->agaw);
1653
David Woodhouse66eae842009-06-27 19:00:32 +01001654 BUG_ON(hpa >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001655
1656 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1657 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001658 iova &= PAGE_MASK;
1659 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1660 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001661 index = 0;
1662 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001663 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001664 if (!pte)
1665 return -ENOMEM;
1666 /* We don't need lock here, nobody else
1667 * touches the iova range
1668 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001669 BUG_ON(dma_pte_addr(pte));
David Woodhousedd4e8312009-06-27 16:21:20 +01001670 dma_set_pte_pfn(pte, start_pfn);
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001671 dma_set_pte_prot(pte, prot);
Sheng Yang9cf06692009-03-18 15:33:07 +08001672 if (prot & DMA_PTE_SNP)
1673 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001674 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001675 start_pfn++;
1676 index++;
1677 }
1678 return 0;
1679}
1680
Weidong Hanc7151a82008-12-08 22:51:37 +08001681static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682{
Weidong Hanc7151a82008-12-08 22:51:37 +08001683 if (!iommu)
1684 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001685
1686 clear_context_table(iommu, bus, devfn);
1687 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001688 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001689 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001690}
1691
1692static void domain_remove_dev_info(struct dmar_domain *domain)
1693{
1694 struct device_domain_info *info;
1695 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001696 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697
1698 spin_lock_irqsave(&device_domain_lock, flags);
1699 while (!list_empty(&domain->devices)) {
1700 info = list_entry(domain->devices.next,
1701 struct device_domain_info, link);
1702 list_del(&info->link);
1703 list_del(&info->global);
1704 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001705 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001706 spin_unlock_irqrestore(&device_domain_lock, flags);
1707
Yu Zhao93a23a72009-05-18 13:51:37 +08001708 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001709 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001710 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 free_devinfo_mem(info);
1712
1713 spin_lock_irqsave(&device_domain_lock, flags);
1714 }
1715 spin_unlock_irqrestore(&device_domain_lock, flags);
1716}
1717
1718/*
1719 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001720 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721 */
Kay, Allen M38717942008-09-09 18:37:29 +03001722static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001723find_domain(struct pci_dev *pdev)
1724{
1725 struct device_domain_info *info;
1726
1727 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001728 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729 if (info)
1730 return info->domain;
1731 return NULL;
1732}
1733
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001734/* domain is initialized */
1735static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1736{
1737 struct dmar_domain *domain, *found = NULL;
1738 struct intel_iommu *iommu;
1739 struct dmar_drhd_unit *drhd;
1740 struct device_domain_info *info, *tmp;
1741 struct pci_dev *dev_tmp;
1742 unsigned long flags;
1743 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001744 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001745 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001746
1747 domain = find_domain(pdev);
1748 if (domain)
1749 return domain;
1750
David Woodhouse276dbf992009-04-04 01:45:37 +01001751 segment = pci_domain_nr(pdev->bus);
1752
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001753 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1754 if (dev_tmp) {
1755 if (dev_tmp->is_pcie) {
1756 bus = dev_tmp->subordinate->number;
1757 devfn = 0;
1758 } else {
1759 bus = dev_tmp->bus->number;
1760 devfn = dev_tmp->devfn;
1761 }
1762 spin_lock_irqsave(&device_domain_lock, flags);
1763 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001764 if (info->segment == segment &&
1765 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766 found = info->domain;
1767 break;
1768 }
1769 }
1770 spin_unlock_irqrestore(&device_domain_lock, flags);
1771 /* pcie-pci bridge already has a domain, uses it */
1772 if (found) {
1773 domain = found;
1774 goto found_domain;
1775 }
1776 }
1777
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001778 domain = alloc_domain();
1779 if (!domain)
1780 goto error;
1781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001782 /* Allocate new domain for the device */
1783 drhd = dmar_find_matched_drhd_unit(pdev);
1784 if (!drhd) {
1785 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1786 pci_name(pdev));
1787 return NULL;
1788 }
1789 iommu = drhd->iommu;
1790
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001791 ret = iommu_attach_domain(domain, iommu);
1792 if (ret) {
1793 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001794 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001795 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001796
1797 if (domain_init(domain, gaw)) {
1798 domain_exit(domain);
1799 goto error;
1800 }
1801
1802 /* register pcie-to-pci device */
1803 if (dev_tmp) {
1804 info = alloc_devinfo_mem();
1805 if (!info) {
1806 domain_exit(domain);
1807 goto error;
1808 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001809 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001810 info->bus = bus;
1811 info->devfn = devfn;
1812 info->dev = NULL;
1813 info->domain = domain;
1814 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001815 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001816
1817 /* pcie-to-pci bridge already has a domain, uses it */
1818 found = NULL;
1819 spin_lock_irqsave(&device_domain_lock, flags);
1820 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001821 if (tmp->segment == segment &&
1822 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823 found = tmp->domain;
1824 break;
1825 }
1826 }
1827 if (found) {
1828 free_devinfo_mem(info);
1829 domain_exit(domain);
1830 domain = found;
1831 } else {
1832 list_add(&info->link, &domain->devices);
1833 list_add(&info->global, &device_domain_list);
1834 }
1835 spin_unlock_irqrestore(&device_domain_lock, flags);
1836 }
1837
1838found_domain:
1839 info = alloc_devinfo_mem();
1840 if (!info)
1841 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001842 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001843 info->bus = pdev->bus->number;
1844 info->devfn = pdev->devfn;
1845 info->dev = pdev;
1846 info->domain = domain;
1847 spin_lock_irqsave(&device_domain_lock, flags);
1848 /* somebody is fast */
1849 found = find_domain(pdev);
1850 if (found != NULL) {
1851 spin_unlock_irqrestore(&device_domain_lock, flags);
1852 if (found != domain) {
1853 domain_exit(domain);
1854 domain = found;
1855 }
1856 free_devinfo_mem(info);
1857 return domain;
1858 }
1859 list_add(&info->link, &domain->devices);
1860 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001861 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862 spin_unlock_irqrestore(&device_domain_lock, flags);
1863 return domain;
1864error:
1865 /* recheck it here, maybe others set it */
1866 return find_domain(pdev);
1867}
1868
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001869static int iommu_identity_mapping;
1870
David Woodhouseb2132032009-06-26 18:50:28 +01001871static int iommu_domain_identity_map(struct dmar_domain *domain,
1872 unsigned long long start,
1873 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001875 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001876 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877
1878 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001879 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001880 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001881 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1883 IOVA_PFN(base + size) - 1)) {
1884 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001885 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886 }
1887
David Woodhouseb2132032009-06-26 18:50:28 +01001888 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1889 size, base, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001890 /*
1891 * RMRR range might have overlap with physical memory range,
1892 * clear it first
1893 */
David Woodhouse595badf2009-06-27 22:09:11 +01001894 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1895 (base + size - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001896
David Woodhouseb2132032009-06-26 18:50:28 +01001897 return domain_page_mapping(domain, base, base, size,
1898 DMA_PTE_READ|DMA_PTE_WRITE);
1899}
1900
1901static int iommu_prepare_identity_map(struct pci_dev *pdev,
1902 unsigned long long start,
1903 unsigned long long end)
1904{
1905 struct dmar_domain *domain;
1906 int ret;
1907
1908 printk(KERN_INFO
1909 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1910 pci_name(pdev), start, end);
1911
David Woodhousec7ab48d2009-06-26 19:10:36 +01001912 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001913 if (!domain)
1914 return -ENOMEM;
1915
1916 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001917 if (ret)
1918 goto error;
1919
1920 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001921 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001922 if (ret)
1923 goto error;
1924
1925 return 0;
1926
1927 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 domain_exit(domain);
1929 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930}
1931
1932static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1933 struct pci_dev *pdev)
1934{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001935 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936 return 0;
1937 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1938 rmrr->end_address + 1);
1939}
1940
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001941#ifdef CONFIG_DMAR_FLOPPY_WA
1942static inline void iommu_prepare_isa(void)
1943{
1944 struct pci_dev *pdev;
1945 int ret;
1946
1947 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1948 if (!pdev)
1949 return;
1950
David Woodhousec7ab48d2009-06-26 19:10:36 +01001951 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001952 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1953
1954 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001955 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1956 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001957
1958}
1959#else
1960static inline void iommu_prepare_isa(void)
1961{
1962 return;
1963}
1964#endif /* !CONFIG_DMAR_FLPY_WA */
1965
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001966/* Initialize each context entry as pass through.*/
1967static int __init init_context_pass_through(void)
1968{
1969 struct pci_dev *pdev = NULL;
1970 struct dmar_domain *domain;
1971 int ret;
1972
1973 for_each_pci_dev(pdev) {
1974 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1975 ret = domain_context_mapping(domain, pdev,
1976 CONTEXT_TT_PASS_THROUGH);
1977 if (ret)
1978 return ret;
1979 }
1980 return 0;
1981}
1982
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001983static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01001984
1985static int __init si_domain_work_fn(unsigned long start_pfn,
1986 unsigned long end_pfn, void *datax)
1987{
1988 int *ret = datax;
1989
1990 *ret = iommu_domain_identity_map(si_domain,
1991 (uint64_t)start_pfn << PAGE_SHIFT,
1992 (uint64_t)end_pfn << PAGE_SHIFT);
1993 return *ret;
1994
1995}
1996
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001997static int si_domain_init(void)
1998{
1999 struct dmar_drhd_unit *drhd;
2000 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002001 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002002
2003 si_domain = alloc_domain();
2004 if (!si_domain)
2005 return -EFAULT;
2006
David Woodhousec7ab48d2009-06-26 19:10:36 +01002007 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002008
2009 for_each_active_iommu(iommu, drhd) {
2010 ret = iommu_attach_domain(si_domain, iommu);
2011 if (ret) {
2012 domain_exit(si_domain);
2013 return -EFAULT;
2014 }
2015 }
2016
2017 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2018 domain_exit(si_domain);
2019 return -EFAULT;
2020 }
2021
2022 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2023
David Woodhousec7ab48d2009-06-26 19:10:36 +01002024 for_each_online_node(nid) {
2025 work_with_active_regions(nid, si_domain_work_fn, &ret);
2026 if (ret)
2027 return ret;
2028 }
2029
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002030 return 0;
2031}
2032
2033static void domain_remove_one_dev_info(struct dmar_domain *domain,
2034 struct pci_dev *pdev);
2035static int identity_mapping(struct pci_dev *pdev)
2036{
2037 struct device_domain_info *info;
2038
2039 if (likely(!iommu_identity_mapping))
2040 return 0;
2041
2042
2043 list_for_each_entry(info, &si_domain->devices, link)
2044 if (info->dev == pdev)
2045 return 1;
2046 return 0;
2047}
2048
2049static int domain_add_dev_info(struct dmar_domain *domain,
2050 struct pci_dev *pdev)
2051{
2052 struct device_domain_info *info;
2053 unsigned long flags;
2054
2055 info = alloc_devinfo_mem();
2056 if (!info)
2057 return -ENOMEM;
2058
2059 info->segment = pci_domain_nr(pdev->bus);
2060 info->bus = pdev->bus->number;
2061 info->devfn = pdev->devfn;
2062 info->dev = pdev;
2063 info->domain = domain;
2064
2065 spin_lock_irqsave(&device_domain_lock, flags);
2066 list_add(&info->link, &domain->devices);
2067 list_add(&info->global, &device_domain_list);
2068 pdev->dev.archdata.iommu = info;
2069 spin_unlock_irqrestore(&device_domain_lock, flags);
2070
2071 return 0;
2072}
2073
2074static int iommu_prepare_static_identity_mapping(void)
2075{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002076 struct pci_dev *pdev = NULL;
2077 int ret;
2078
2079 ret = si_domain_init();
2080 if (ret)
2081 return -EFAULT;
2082
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002083 for_each_pci_dev(pdev) {
David Woodhousec7ab48d2009-06-26 19:10:36 +01002084 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2085 pci_name(pdev));
2086
2087 ret = domain_context_mapping(si_domain, pdev,
2088 CONTEXT_TT_MULTI_LEVEL);
2089 if (ret)
2090 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002091 ret = domain_add_dev_info(si_domain, pdev);
2092 if (ret)
2093 return ret;
2094 }
2095
2096 return 0;
2097}
2098
2099int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002100{
2101 struct dmar_drhd_unit *drhd;
2102 struct dmar_rmrr_unit *rmrr;
2103 struct pci_dev *pdev;
2104 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002105 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002106 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002107
2108 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002109 * In case pass through can not be enabled, iommu tries to use identity
2110 * mapping.
2111 */
2112 if (iommu_pass_through)
2113 iommu_identity_mapping = 1;
2114
2115 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002116 * for each drhd
2117 * allocate root
2118 * initialize and program root entry to not present
2119 * endfor
2120 */
2121 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002122 g_num_of_iommus++;
2123 /*
2124 * lock not needed as this is only incremented in the single
2125 * threaded kernel __init code path all other access are read
2126 * only
2127 */
2128 }
2129
Weidong Hand9630fe2008-12-08 11:06:32 +08002130 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2131 GFP_KERNEL);
2132 if (!g_iommus) {
2133 printk(KERN_ERR "Allocating global iommu array failed\n");
2134 ret = -ENOMEM;
2135 goto error;
2136 }
2137
mark gross80b20dd2008-04-18 13:53:58 -07002138 deferred_flush = kzalloc(g_num_of_iommus *
2139 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2140 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002141 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002142 ret = -ENOMEM;
2143 goto error;
2144 }
2145
mark gross5e0d2a62008-03-04 15:22:08 -08002146 for_each_drhd_unit(drhd) {
2147 if (drhd->ignored)
2148 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002149
2150 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002151 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002152
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002153 ret = iommu_init_domains(iommu);
2154 if (ret)
2155 goto error;
2156
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002157 /*
2158 * TBD:
2159 * we could share the same root & context tables
2160 * amoung all IOMMU's. Need to Split it later.
2161 */
2162 ret = iommu_alloc_root_entry(iommu);
2163 if (ret) {
2164 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2165 goto error;
2166 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002167 if (!ecap_pass_through(iommu->ecap))
2168 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002169 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002170 if (iommu_pass_through)
2171 if (!pass_through) {
2172 printk(KERN_INFO
2173 "Pass Through is not supported by hardware.\n");
2174 iommu_pass_through = 0;
2175 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002176
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002177 /*
2178 * Start from the sane iommu hardware state.
2179 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002180 for_each_drhd_unit(drhd) {
2181 if (drhd->ignored)
2182 continue;
2183
2184 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002185
2186 /*
2187 * If the queued invalidation is already initialized by us
2188 * (for example, while enabling interrupt-remapping) then
2189 * we got the things already rolling from a sane state.
2190 */
2191 if (iommu->qi)
2192 continue;
2193
2194 /*
2195 * Clear any previous faults.
2196 */
2197 dmar_fault(-1, iommu);
2198 /*
2199 * Disable queued invalidation if supported and already enabled
2200 * before OS handover.
2201 */
2202 dmar_disable_qi(iommu);
2203 }
2204
2205 for_each_drhd_unit(drhd) {
2206 if (drhd->ignored)
2207 continue;
2208
2209 iommu = drhd->iommu;
2210
Youquan Songa77b67d2008-10-16 16:31:56 -07002211 if (dmar_enable_qi(iommu)) {
2212 /*
2213 * Queued Invalidate not enabled, use Register Based
2214 * Invalidate
2215 */
2216 iommu->flush.flush_context = __iommu_flush_context;
2217 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2218 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002219 "invalidation\n",
2220 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002221 } else {
2222 iommu->flush.flush_context = qi_flush_context;
2223 iommu->flush.flush_iotlb = qi_flush_iotlb;
2224 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002225 "invalidation\n",
2226 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002227 }
2228 }
2229
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002231 * If pass through is set and enabled, context entries of all pci
2232 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002233 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002234 if (iommu_pass_through) {
2235 ret = init_context_pass_through();
2236 if (ret) {
2237 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2238 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002239 }
2240 }
2241
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002242 /*
2243 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002244 * identity mappings for rmrr, gfx, and isa and may fall back to static
2245 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002246 */
2247 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002248 if (iommu_identity_mapping)
2249 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002250 /*
2251 * For each rmrr
2252 * for each dev attached to rmrr
2253 * do
2254 * locate drhd for dev, alloc domain for dev
2255 * allocate free domain
2256 * allocate page table entries for rmrr
2257 * if context not allocated for bus
2258 * allocate and init context
2259 * set present in root table for this bus
2260 * init context with domain, translation etc
2261 * endfor
2262 * endfor
2263 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002264 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002265 for_each_rmrr_units(rmrr) {
2266 for (i = 0; i < rmrr->devices_cnt; i++) {
2267 pdev = rmrr->devices[i];
2268 /*
2269 * some BIOS lists non-exist devices in DMAR
2270 * table.
2271 */
2272 if (!pdev)
2273 continue;
2274 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2275 if (ret)
2276 printk(KERN_ERR
2277 "IOMMU: mapping reserved region failed\n");
2278 }
2279 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002280
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002281 iommu_prepare_isa();
2282 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002283
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002284 /*
2285 * for each drhd
2286 * enable fault log
2287 * global invalidate context cache
2288 * global invalidate iotlb
2289 * enable translation
2290 */
2291 for_each_drhd_unit(drhd) {
2292 if (drhd->ignored)
2293 continue;
2294 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002295
2296 iommu_flush_write_buffer(iommu);
2297
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002298 ret = dmar_set_interrupt(iommu);
2299 if (ret)
2300 goto error;
2301
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002302 iommu_set_root_entry(iommu);
2303
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002304 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002305 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002306 iommu_disable_protect_mem_regions(iommu);
2307
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002308 ret = iommu_enable_translation(iommu);
2309 if (ret)
2310 goto error;
2311 }
2312
2313 return 0;
2314error:
2315 for_each_drhd_unit(drhd) {
2316 if (drhd->ignored)
2317 continue;
2318 iommu = drhd->iommu;
2319 free_iommu(iommu);
2320 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002321 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002322 return ret;
2323}
2324
2325static inline u64 aligned_size(u64 host_addr, size_t size)
2326{
2327 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002328 addr = (host_addr & (~PAGE_MASK)) + size;
2329 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002330}
2331
2332struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002333iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002334{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002335 struct iova *piova;
2336
2337 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002338 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002339 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002340 return NULL;
2341
2342 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002343 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002344 return piova;
2345}
2346
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002347static struct iova *
2348__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002349 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002350{
2351 struct pci_dev *pdev = to_pci_dev(dev);
2352 struct iova *iova = NULL;
2353
Yang Hongyang284901a2009-04-06 19:01:15 -07002354 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002355 iova = iommu_alloc_iova(domain, size, dma_mask);
2356 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002357 /*
2358 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002359 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002360 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002361 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002362 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002363 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002364 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002365 }
2366
2367 if (!iova) {
2368 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2369 return NULL;
2370 }
2371
2372 return iova;
2373}
2374
2375static struct dmar_domain *
2376get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002377{
2378 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002379 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002380
2381 domain = get_domain_for_dev(pdev,
2382 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2383 if (!domain) {
2384 printk(KERN_ERR
2385 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002386 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002387 }
2388
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002390 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002391 ret = domain_context_mapping(domain, pdev,
2392 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002393 if (ret) {
2394 printk(KERN_ERR
2395 "Domain context map for %s failed",
2396 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002397 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002398 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 }
2400
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002401 return domain;
2402}
2403
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002404static int iommu_dummy(struct pci_dev *pdev)
2405{
2406 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2407}
2408
2409/* Check if the pdev needs to go through non-identity map and unmap process.*/
2410static int iommu_no_mapping(struct pci_dev *pdev)
2411{
2412 int found;
2413
2414 if (!iommu_identity_mapping)
2415 return iommu_dummy(pdev);
2416
2417 found = identity_mapping(pdev);
2418 if (found) {
2419 if (pdev->dma_mask > DMA_BIT_MASK(32))
2420 return 1;
2421 else {
2422 /*
2423 * 32 bit DMA is removed from si_domain and fall back
2424 * to non-identity mapping.
2425 */
2426 domain_remove_one_dev_info(si_domain, pdev);
2427 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2428 pci_name(pdev));
2429 return 0;
2430 }
2431 } else {
2432 /*
2433 * In case of a detached 64 bit DMA device from vm, the device
2434 * is put into si_domain for identity mapping.
2435 */
2436 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2437 int ret;
2438 ret = domain_add_dev_info(si_domain, pdev);
2439 if (!ret) {
2440 printk(KERN_INFO "64bit %s uses identity mapping\n",
2441 pci_name(pdev));
2442 return 1;
2443 }
2444 }
2445 }
2446
2447 return iommu_dummy(pdev);
2448}
2449
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002450static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2451 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002452{
2453 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002454 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002455 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002456 struct iova *iova;
2457 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002458 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002459 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002460
2461 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002462
2463 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002464 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002465
2466 domain = get_valid_domain_for_dev(pdev);
2467 if (!domain)
2468 return 0;
2469
Weidong Han8c11e792008-12-08 15:29:22 +08002470 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002471 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002472
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002473 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002474 if (!iova)
2475 goto error;
2476
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002477 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002478
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002479 /*
2480 * Check if DMAR supports zero-length reads on write only
2481 * mappings..
2482 */
2483 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002484 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002485 prot |= DMA_PTE_READ;
2486 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2487 prot |= DMA_PTE_WRITE;
2488 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002489 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002490 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002491 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002492 * is not a big problem
2493 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002494 ret = domain_page_mapping(domain, start_paddr,
David Woodhousefd18de52009-05-10 23:57:41 +01002495 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2496 size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002497 if (ret)
2498 goto error;
2499
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002500 /* it's a non-present to present mapping. Only flush if caching mode */
2501 if (cap_caching_mode(iommu->cap))
2502 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2503 size >> VTD_PAGE_SHIFT);
2504 else
Weidong Han8c11e792008-12-08 15:29:22 +08002505 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002506
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002507 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002508
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002509error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002510 if (iova)
2511 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002512 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002513 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002514 return 0;
2515}
2516
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002517static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2518 unsigned long offset, size_t size,
2519 enum dma_data_direction dir,
2520 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002521{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002522 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2523 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002524}
2525
mark gross5e0d2a62008-03-04 15:22:08 -08002526static void flush_unmaps(void)
2527{
mark gross80b20dd2008-04-18 13:53:58 -07002528 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002529
mark gross5e0d2a62008-03-04 15:22:08 -08002530 timer_on = 0;
2531
2532 /* just flush them all */
2533 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002534 struct intel_iommu *iommu = g_iommus[i];
2535 if (!iommu)
2536 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002537
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002538 if (!deferred_flush[i].next)
2539 continue;
2540
2541 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002542 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002543 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002544 unsigned long mask;
2545 struct iova *iova = deferred_flush[i].iova[j];
2546
2547 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2548 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2549 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2550 iova->pfn_lo << PAGE_SHIFT, mask);
2551 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002552 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002553 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002554 }
2555
mark gross5e0d2a62008-03-04 15:22:08 -08002556 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002557}
2558
2559static void flush_unmaps_timeout(unsigned long data)
2560{
mark gross80b20dd2008-04-18 13:53:58 -07002561 unsigned long flags;
2562
2563 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002564 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002565 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002566}
2567
2568static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2569{
2570 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002571 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002572 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002573
2574 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002575 if (list_size == HIGH_WATER_MARK)
2576 flush_unmaps();
2577
Weidong Han8c11e792008-12-08 15:29:22 +08002578 iommu = domain_get_iommu(dom);
2579 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002580
mark gross80b20dd2008-04-18 13:53:58 -07002581 next = deferred_flush[iommu_id].next;
2582 deferred_flush[iommu_id].domain[next] = dom;
2583 deferred_flush[iommu_id].iova[next] = iova;
2584 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002585
2586 if (!timer_on) {
2587 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2588 timer_on = 1;
2589 }
2590 list_size++;
2591 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2592}
2593
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002594static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2595 size_t size, enum dma_data_direction dir,
2596 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002597{
2598 struct pci_dev *pdev = to_pci_dev(dev);
2599 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002600 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002601 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002602 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002603
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002604 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002606
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002607 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002608 BUG_ON(!domain);
2609
Weidong Han8c11e792008-12-08 15:29:22 +08002610 iommu = domain_get_iommu(domain);
2611
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002612 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2613 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002614 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002615
David Woodhoused794dc92009-06-28 00:27:49 +01002616 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2617 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002618
David Woodhoused794dc92009-06-28 00:27:49 +01002619 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2620 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002621
2622 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002623 dma_pte_clear_range(domain, start_pfn, last_pfn);
2624
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002626 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2627
mark gross5e0d2a62008-03-04 15:22:08 -08002628 if (intel_iommu_strict) {
David Woodhoused794dc92009-06-28 00:27:49 +01002629 iommu_flush_iotlb_psi(iommu, domain->id,
2630 start_pfn << VTD_PAGE_SHIFT,
2631 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002632 /* free iova */
2633 __free_iova(&domain->iovad, iova);
2634 } else {
2635 add_unmap(domain, iova);
2636 /*
2637 * queue up the release of the unmap to save the 1/6th of the
2638 * cpu used up by the iotlb flush operation...
2639 */
mark gross5e0d2a62008-03-04 15:22:08 -08002640 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641}
2642
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002643static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2644 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002645{
2646 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2647}
2648
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002649static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2650 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002651{
2652 void *vaddr;
2653 int order;
2654
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002655 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656 order = get_order(size);
2657 flags &= ~(GFP_DMA | GFP_DMA32);
2658
2659 vaddr = (void *)__get_free_pages(flags, order);
2660 if (!vaddr)
2661 return NULL;
2662 memset(vaddr, 0, size);
2663
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002664 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2665 DMA_BIDIRECTIONAL,
2666 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002667 if (*dma_handle)
2668 return vaddr;
2669 free_pages((unsigned long)vaddr, order);
2670 return NULL;
2671}
2672
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002673static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2674 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002675{
2676 int order;
2677
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002678 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002679 order = get_order(size);
2680
2681 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2682 free_pages((unsigned long)vaddr, order);
2683}
2684
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002685static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2686 int nelems, enum dma_data_direction dir,
2687 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002688{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002689 struct pci_dev *pdev = to_pci_dev(hwdev);
2690 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002691 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002692 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002693 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002694
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002695 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696 return;
2697
2698 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002699 BUG_ON(!domain);
2700
2701 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002702
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002703 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002704 if (!iova)
2705 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002706
David Woodhoused794dc92009-06-28 00:27:49 +01002707 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2708 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002709
2710 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002711 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002712
David Woodhoused794dc92009-06-28 00:27:49 +01002713 /* free page tables */
2714 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2715
2716 iommu_flush_iotlb_psi(iommu, domain->id,
2717 start_pfn << VTD_PAGE_SHIFT,
2718 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002719
2720 /* free iova */
2721 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002722}
2723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002725 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002726{
2727 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002728 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002730 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002731 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002732 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002733 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002734 }
2735 return nelems;
2736}
2737
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002738static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2739 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002740{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002741 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002742 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002743 struct pci_dev *pdev = to_pci_dev(hwdev);
2744 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002745 size_t size = 0;
2746 int prot = 0;
2747 size_t offset = 0;
2748 struct iova *iova = NULL;
2749 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002750 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002751 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002752 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002753
2754 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002756 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002758 domain = get_valid_domain_for_dev(pdev);
2759 if (!domain)
2760 return 0;
2761
Weidong Han8c11e792008-12-08 15:29:22 +08002762 iommu = domain_get_iommu(domain);
2763
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002764 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002765 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002766 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002767 }
2768
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002769 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002770 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002771 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002772 return 0;
2773 }
2774
2775 /*
2776 * Check if DMAR supports zero-length reads on write only
2777 * mappings..
2778 */
2779 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002780 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002781 prot |= DMA_PTE_READ;
2782 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2783 prot |= DMA_PTE_WRITE;
2784
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002785 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002786 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002787 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002788 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002789 size = aligned_size((u64)addr, sg->length);
2790 ret = domain_page_mapping(domain, start_addr + offset,
David Woodhousefd18de52009-05-10 23:57:41 +01002791 ((u64)addr) & PHYSICAL_PAGE_MASK,
2792 size, prot);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002793 if (ret) {
2794 /* clear the page */
David Woodhouse595badf2009-06-27 22:09:11 +01002795 dma_pte_clear_range(domain,
2796 start_addr >> VTD_PAGE_SHIFT,
2797 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002798 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002799 dma_pte_free_pagetable(domain, start_addr >> VTD_PAGE_SHIFT,
2800 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801 /* free iova */
2802 __free_iova(&domain->iovad, iova);
2803 return 0;
2804 }
2805 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002806 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002807 sg->dma_length = sg->length;
2808 offset += size;
2809 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002810
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002811 /* it's a non-present to present mapping. Only flush if caching mode */
2812 if (cap_caching_mode(iommu->cap))
2813 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2814 offset >> VTD_PAGE_SHIFT);
2815 else
Weidong Han8c11e792008-12-08 15:29:22 +08002816 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002817
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002818 return nelems;
2819}
2820
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002821static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2822{
2823 return !dma_addr;
2824}
2825
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002826struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002827 .alloc_coherent = intel_alloc_coherent,
2828 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002829 .map_sg = intel_map_sg,
2830 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002831 .map_page = intel_map_page,
2832 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002833 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002834};
2835
2836static inline int iommu_domain_cache_init(void)
2837{
2838 int ret = 0;
2839
2840 iommu_domain_cache = kmem_cache_create("iommu_domain",
2841 sizeof(struct dmar_domain),
2842 0,
2843 SLAB_HWCACHE_ALIGN,
2844
2845 NULL);
2846 if (!iommu_domain_cache) {
2847 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2848 ret = -ENOMEM;
2849 }
2850
2851 return ret;
2852}
2853
2854static inline int iommu_devinfo_cache_init(void)
2855{
2856 int ret = 0;
2857
2858 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2859 sizeof(struct device_domain_info),
2860 0,
2861 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002862 NULL);
2863 if (!iommu_devinfo_cache) {
2864 printk(KERN_ERR "Couldn't create devinfo cache\n");
2865 ret = -ENOMEM;
2866 }
2867
2868 return ret;
2869}
2870
2871static inline int iommu_iova_cache_init(void)
2872{
2873 int ret = 0;
2874
2875 iommu_iova_cache = kmem_cache_create("iommu_iova",
2876 sizeof(struct iova),
2877 0,
2878 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002879 NULL);
2880 if (!iommu_iova_cache) {
2881 printk(KERN_ERR "Couldn't create iova cache\n");
2882 ret = -ENOMEM;
2883 }
2884
2885 return ret;
2886}
2887
2888static int __init iommu_init_mempool(void)
2889{
2890 int ret;
2891 ret = iommu_iova_cache_init();
2892 if (ret)
2893 return ret;
2894
2895 ret = iommu_domain_cache_init();
2896 if (ret)
2897 goto domain_error;
2898
2899 ret = iommu_devinfo_cache_init();
2900 if (!ret)
2901 return ret;
2902
2903 kmem_cache_destroy(iommu_domain_cache);
2904domain_error:
2905 kmem_cache_destroy(iommu_iova_cache);
2906
2907 return -ENOMEM;
2908}
2909
2910static void __init iommu_exit_mempool(void)
2911{
2912 kmem_cache_destroy(iommu_devinfo_cache);
2913 kmem_cache_destroy(iommu_domain_cache);
2914 kmem_cache_destroy(iommu_iova_cache);
2915
2916}
2917
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002918static void __init init_no_remapping_devices(void)
2919{
2920 struct dmar_drhd_unit *drhd;
2921
2922 for_each_drhd_unit(drhd) {
2923 if (!drhd->include_all) {
2924 int i;
2925 for (i = 0; i < drhd->devices_cnt; i++)
2926 if (drhd->devices[i] != NULL)
2927 break;
2928 /* ignore DMAR unit if no pci devices exist */
2929 if (i == drhd->devices_cnt)
2930 drhd->ignored = 1;
2931 }
2932 }
2933
2934 if (dmar_map_gfx)
2935 return;
2936
2937 for_each_drhd_unit(drhd) {
2938 int i;
2939 if (drhd->ignored || drhd->include_all)
2940 continue;
2941
2942 for (i = 0; i < drhd->devices_cnt; i++)
2943 if (drhd->devices[i] &&
2944 !IS_GFX_DEVICE(drhd->devices[i]))
2945 break;
2946
2947 if (i < drhd->devices_cnt)
2948 continue;
2949
2950 /* bypass IOMMU if it is just for gfx devices */
2951 drhd->ignored = 1;
2952 for (i = 0; i < drhd->devices_cnt; i++) {
2953 if (!drhd->devices[i])
2954 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002955 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002956 }
2957 }
2958}
2959
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002960#ifdef CONFIG_SUSPEND
2961static int init_iommu_hw(void)
2962{
2963 struct dmar_drhd_unit *drhd;
2964 struct intel_iommu *iommu = NULL;
2965
2966 for_each_active_iommu(iommu, drhd)
2967 if (iommu->qi)
2968 dmar_reenable_qi(iommu);
2969
2970 for_each_active_iommu(iommu, drhd) {
2971 iommu_flush_write_buffer(iommu);
2972
2973 iommu_set_root_entry(iommu);
2974
2975 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002976 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002977 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002978 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002979 iommu_disable_protect_mem_regions(iommu);
2980 iommu_enable_translation(iommu);
2981 }
2982
2983 return 0;
2984}
2985
2986static void iommu_flush_all(void)
2987{
2988 struct dmar_drhd_unit *drhd;
2989 struct intel_iommu *iommu;
2990
2991 for_each_active_iommu(iommu, drhd) {
2992 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002993 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002994 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002995 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002996 }
2997}
2998
2999static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3000{
3001 struct dmar_drhd_unit *drhd;
3002 struct intel_iommu *iommu = NULL;
3003 unsigned long flag;
3004
3005 for_each_active_iommu(iommu, drhd) {
3006 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3007 GFP_ATOMIC);
3008 if (!iommu->iommu_state)
3009 goto nomem;
3010 }
3011
3012 iommu_flush_all();
3013
3014 for_each_active_iommu(iommu, drhd) {
3015 iommu_disable_translation(iommu);
3016
3017 spin_lock_irqsave(&iommu->register_lock, flag);
3018
3019 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3020 readl(iommu->reg + DMAR_FECTL_REG);
3021 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3022 readl(iommu->reg + DMAR_FEDATA_REG);
3023 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3024 readl(iommu->reg + DMAR_FEADDR_REG);
3025 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3026 readl(iommu->reg + DMAR_FEUADDR_REG);
3027
3028 spin_unlock_irqrestore(&iommu->register_lock, flag);
3029 }
3030 return 0;
3031
3032nomem:
3033 for_each_active_iommu(iommu, drhd)
3034 kfree(iommu->iommu_state);
3035
3036 return -ENOMEM;
3037}
3038
3039static int iommu_resume(struct sys_device *dev)
3040{
3041 struct dmar_drhd_unit *drhd;
3042 struct intel_iommu *iommu = NULL;
3043 unsigned long flag;
3044
3045 if (init_iommu_hw()) {
3046 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3047 return -EIO;
3048 }
3049
3050 for_each_active_iommu(iommu, drhd) {
3051
3052 spin_lock_irqsave(&iommu->register_lock, flag);
3053
3054 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3055 iommu->reg + DMAR_FECTL_REG);
3056 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3057 iommu->reg + DMAR_FEDATA_REG);
3058 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3059 iommu->reg + DMAR_FEADDR_REG);
3060 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3061 iommu->reg + DMAR_FEUADDR_REG);
3062
3063 spin_unlock_irqrestore(&iommu->register_lock, flag);
3064 }
3065
3066 for_each_active_iommu(iommu, drhd)
3067 kfree(iommu->iommu_state);
3068
3069 return 0;
3070}
3071
3072static struct sysdev_class iommu_sysclass = {
3073 .name = "iommu",
3074 .resume = iommu_resume,
3075 .suspend = iommu_suspend,
3076};
3077
3078static struct sys_device device_iommu = {
3079 .cls = &iommu_sysclass,
3080};
3081
3082static int __init init_iommu_sysfs(void)
3083{
3084 int error;
3085
3086 error = sysdev_class_register(&iommu_sysclass);
3087 if (error)
3088 return error;
3089
3090 error = sysdev_register(&device_iommu);
3091 if (error)
3092 sysdev_class_unregister(&iommu_sysclass);
3093
3094 return error;
3095}
3096
3097#else
3098static int __init init_iommu_sysfs(void)
3099{
3100 return 0;
3101}
3102#endif /* CONFIG_PM */
3103
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003104int __init intel_iommu_init(void)
3105{
3106 int ret = 0;
3107
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003108 if (dmar_table_init())
3109 return -ENODEV;
3110
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003111 if (dmar_dev_scope_init())
3112 return -ENODEV;
3113
Suresh Siddha2ae21012008-07-10 11:16:43 -07003114 /*
3115 * Check the need for DMA-remapping initialization now.
3116 * Above initialization will also be used by Interrupt-remapping.
3117 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003118 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003119 return -ENODEV;
3120
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121 iommu_init_mempool();
3122 dmar_init_reserved_ranges();
3123
3124 init_no_remapping_devices();
3125
3126 ret = init_dmars();
3127 if (ret) {
3128 printk(KERN_ERR "IOMMU: dmar init failed\n");
3129 put_iova_domain(&reserved_iova_list);
3130 iommu_exit_mempool();
3131 return ret;
3132 }
3133 printk(KERN_INFO
3134 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3135
mark gross5e0d2a62008-03-04 15:22:08 -08003136 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003137 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003138
3139 if (!iommu_pass_through) {
3140 printk(KERN_INFO
3141 "Multi-level page-table translation for DMAR.\n");
3142 dma_ops = &intel_dma_ops;
3143 } else
3144 printk(KERN_INFO
3145 "DMAR: Pass through translation for DMAR.\n");
3146
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003147 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003148
3149 register_iommu(&intel_iommu_ops);
3150
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003151 return 0;
3152}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003153
Han, Weidong3199aa62009-02-26 17:31:12 +08003154static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3155 struct pci_dev *pdev)
3156{
3157 struct pci_dev *tmp, *parent;
3158
3159 if (!iommu || !pdev)
3160 return;
3161
3162 /* dependent device detach */
3163 tmp = pci_find_upstream_pcie_bridge(pdev);
3164 /* Secondary interface's bus number and devfn 0 */
3165 if (tmp) {
3166 parent = pdev->bus->self;
3167 while (parent != tmp) {
3168 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003169 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003170 parent = parent->bus->self;
3171 }
3172 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3173 iommu_detach_dev(iommu,
3174 tmp->subordinate->number, 0);
3175 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003176 iommu_detach_dev(iommu, tmp->bus->number,
3177 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003178 }
3179}
3180
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003181static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003182 struct pci_dev *pdev)
3183{
3184 struct device_domain_info *info;
3185 struct intel_iommu *iommu;
3186 unsigned long flags;
3187 int found = 0;
3188 struct list_head *entry, *tmp;
3189
David Woodhouse276dbf992009-04-04 01:45:37 +01003190 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3191 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003192 if (!iommu)
3193 return;
3194
3195 spin_lock_irqsave(&device_domain_lock, flags);
3196 list_for_each_safe(entry, tmp, &domain->devices) {
3197 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003198 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003199 if (info->bus == pdev->bus->number &&
3200 info->devfn == pdev->devfn) {
3201 list_del(&info->link);
3202 list_del(&info->global);
3203 if (info->dev)
3204 info->dev->dev.archdata.iommu = NULL;
3205 spin_unlock_irqrestore(&device_domain_lock, flags);
3206
Yu Zhao93a23a72009-05-18 13:51:37 +08003207 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003208 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003209 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003210 free_devinfo_mem(info);
3211
3212 spin_lock_irqsave(&device_domain_lock, flags);
3213
3214 if (found)
3215 break;
3216 else
3217 continue;
3218 }
3219
3220 /* if there is no other devices under the same iommu
3221 * owned by this domain, clear this iommu in iommu_bmp
3222 * update iommu count and coherency
3223 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003224 if (iommu == device_to_iommu(info->segment, info->bus,
3225 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003226 found = 1;
3227 }
3228
3229 if (found == 0) {
3230 unsigned long tmp_flags;
3231 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3232 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3233 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003234 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003235 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3236 }
3237
3238 spin_unlock_irqrestore(&device_domain_lock, flags);
3239}
3240
3241static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3242{
3243 struct device_domain_info *info;
3244 struct intel_iommu *iommu;
3245 unsigned long flags1, flags2;
3246
3247 spin_lock_irqsave(&device_domain_lock, flags1);
3248 while (!list_empty(&domain->devices)) {
3249 info = list_entry(domain->devices.next,
3250 struct device_domain_info, link);
3251 list_del(&info->link);
3252 list_del(&info->global);
3253 if (info->dev)
3254 info->dev->dev.archdata.iommu = NULL;
3255
3256 spin_unlock_irqrestore(&device_domain_lock, flags1);
3257
Yu Zhao93a23a72009-05-18 13:51:37 +08003258 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003259 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003260 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003261 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003262
3263 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003264 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003265 */
3266 spin_lock_irqsave(&domain->iommu_lock, flags2);
3267 if (test_and_clear_bit(iommu->seq_id,
3268 &domain->iommu_bmp)) {
3269 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003270 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003271 }
3272 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3273
3274 free_devinfo_mem(info);
3275 spin_lock_irqsave(&device_domain_lock, flags1);
3276 }
3277 spin_unlock_irqrestore(&device_domain_lock, flags1);
3278}
3279
Weidong Han5e98c4b2008-12-08 23:03:27 +08003280/* domain id for virtual machine, it won't be set in context */
3281static unsigned long vm_domid;
3282
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003283static int vm_domain_min_agaw(struct dmar_domain *domain)
3284{
3285 int i;
3286 int min_agaw = domain->agaw;
3287
3288 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3289 for (; i < g_num_of_iommus; ) {
3290 if (min_agaw > g_iommus[i]->agaw)
3291 min_agaw = g_iommus[i]->agaw;
3292
3293 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3294 }
3295
3296 return min_agaw;
3297}
3298
Weidong Han5e98c4b2008-12-08 23:03:27 +08003299static struct dmar_domain *iommu_alloc_vm_domain(void)
3300{
3301 struct dmar_domain *domain;
3302
3303 domain = alloc_domain_mem();
3304 if (!domain)
3305 return NULL;
3306
3307 domain->id = vm_domid++;
3308 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3309 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3310
3311 return domain;
3312}
3313
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003314static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003315{
3316 int adjust_width;
3317
3318 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3319 spin_lock_init(&domain->mapping_lock);
3320 spin_lock_init(&domain->iommu_lock);
3321
3322 domain_reserve_special_ranges(domain);
3323
3324 /* calculate AGAW */
3325 domain->gaw = guest_width;
3326 adjust_width = guestwidth_to_adjustwidth(guest_width);
3327 domain->agaw = width_to_agaw(adjust_width);
3328
3329 INIT_LIST_HEAD(&domain->devices);
3330
3331 domain->iommu_count = 0;
3332 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003333 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003334
3335 /* always allocate the top pgd */
3336 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3337 if (!domain->pgd)
3338 return -ENOMEM;
3339 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3340 return 0;
3341}
3342
3343static void iommu_free_vm_domain(struct dmar_domain *domain)
3344{
3345 unsigned long flags;
3346 struct dmar_drhd_unit *drhd;
3347 struct intel_iommu *iommu;
3348 unsigned long i;
3349 unsigned long ndomains;
3350
3351 for_each_drhd_unit(drhd) {
3352 if (drhd->ignored)
3353 continue;
3354 iommu = drhd->iommu;
3355
3356 ndomains = cap_ndoms(iommu->cap);
3357 i = find_first_bit(iommu->domain_ids, ndomains);
3358 for (; i < ndomains; ) {
3359 if (iommu->domains[i] == domain) {
3360 spin_lock_irqsave(&iommu->lock, flags);
3361 clear_bit(i, iommu->domain_ids);
3362 iommu->domains[i] = NULL;
3363 spin_unlock_irqrestore(&iommu->lock, flags);
3364 break;
3365 }
3366 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3367 }
3368 }
3369}
3370
3371static void vm_domain_exit(struct dmar_domain *domain)
3372{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003373 /* Domain 0 is reserved, so dont process it */
3374 if (!domain)
3375 return;
3376
3377 vm_domain_remove_all_dev_info(domain);
3378 /* destroy iovas */
3379 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003380
3381 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003382 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003383
3384 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003385 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003386
3387 iommu_free_vm_domain(domain);
3388 free_domain_mem(domain);
3389}
3390
Joerg Roedel5d450802008-12-03 14:52:32 +01003391static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003392{
Joerg Roedel5d450802008-12-03 14:52:32 +01003393 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003394
Joerg Roedel5d450802008-12-03 14:52:32 +01003395 dmar_domain = iommu_alloc_vm_domain();
3396 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003397 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003398 "intel_iommu_domain_init: dmar_domain == NULL\n");
3399 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003400 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003401 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003402 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003403 "intel_iommu_domain_init() failed\n");
3404 vm_domain_exit(dmar_domain);
3405 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003406 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003407 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003408
Joerg Roedel5d450802008-12-03 14:52:32 +01003409 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003410}
Kay, Allen M38717942008-09-09 18:37:29 +03003411
Joerg Roedel5d450802008-12-03 14:52:32 +01003412static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003413{
Joerg Roedel5d450802008-12-03 14:52:32 +01003414 struct dmar_domain *dmar_domain = domain->priv;
3415
3416 domain->priv = NULL;
3417 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003418}
Kay, Allen M38717942008-09-09 18:37:29 +03003419
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003420static int intel_iommu_attach_device(struct iommu_domain *domain,
3421 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003422{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003423 struct dmar_domain *dmar_domain = domain->priv;
3424 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003425 struct intel_iommu *iommu;
3426 int addr_width;
3427 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003428 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003429
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003430 /* normally pdev is not mapped */
3431 if (unlikely(domain_context_mapped(pdev))) {
3432 struct dmar_domain *old_domain;
3433
3434 old_domain = find_domain(pdev);
3435 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003436 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3437 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3438 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003439 else
3440 domain_remove_dev_info(old_domain);
3441 }
3442 }
3443
David Woodhouse276dbf992009-04-04 01:45:37 +01003444 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3445 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003446 if (!iommu)
3447 return -ENODEV;
3448
3449 /* check if this iommu agaw is sufficient for max mapped address */
3450 addr_width = agaw_to_width(iommu->agaw);
3451 end = DOMAIN_MAX_ADDR(addr_width);
3452 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003453 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003454 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3455 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003456 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003457 return -EFAULT;
3458 }
3459
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003460 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003461 if (ret)
3462 return ret;
3463
Yu Zhao93a23a72009-05-18 13:51:37 +08003464 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003465 return ret;
3466}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003467
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003468static void intel_iommu_detach_device(struct iommu_domain *domain,
3469 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003470{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003471 struct dmar_domain *dmar_domain = domain->priv;
3472 struct pci_dev *pdev = to_pci_dev(dev);
3473
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003474 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003475}
Kay, Allen M38717942008-09-09 18:37:29 +03003476
Joerg Roedeldde57a22008-12-03 15:04:09 +01003477static int intel_iommu_map_range(struct iommu_domain *domain,
3478 unsigned long iova, phys_addr_t hpa,
3479 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003480{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003481 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003482 u64 max_addr;
3483 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003484 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003485 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003486
Joerg Roedeldde57a22008-12-03 15:04:09 +01003487 if (iommu_prot & IOMMU_READ)
3488 prot |= DMA_PTE_READ;
3489 if (iommu_prot & IOMMU_WRITE)
3490 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08003491 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3492 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003493
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003494 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003495 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003496 int min_agaw;
3497 u64 end;
3498
3499 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003500 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003501 addr_width = agaw_to_width(min_agaw);
3502 end = DOMAIN_MAX_ADDR(addr_width);
3503 end = end & VTD_PAGE_MASK;
3504 if (end < max_addr) {
3505 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3506 "sufficient for the mapped address (%llx)\n",
3507 __func__, min_agaw, max_addr);
3508 return -EFAULT;
3509 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003510 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003511 }
3512
Joerg Roedeldde57a22008-12-03 15:04:09 +01003513 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003514 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003515}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003516
Joerg Roedeldde57a22008-12-03 15:04:09 +01003517static void intel_iommu_unmap_range(struct iommu_domain *domain,
3518 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003519{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003520 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003521 dma_addr_t base;
3522
3523 /* The address might not be aligned */
3524 base = iova & VTD_PAGE_MASK;
3525 size = VTD_PAGE_ALIGN(size);
David Woodhouse595badf2009-06-27 22:09:11 +01003526 dma_pte_clear_range(dmar_domain, base >> VTD_PAGE_SHIFT,
3527 (base + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003528
Joerg Roedeldde57a22008-12-03 15:04:09 +01003529 if (dmar_domain->max_addr == base + size)
3530 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003531}
Kay, Allen M38717942008-09-09 18:37:29 +03003532
Joerg Roedeld14d6572008-12-03 15:06:57 +01003533static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3534 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003535{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003536 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003537 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003538 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003539
Joerg Roedeld14d6572008-12-03 15:06:57 +01003540 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03003541 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003542 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003543
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003544 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003545}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003546
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003547static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3548 unsigned long cap)
3549{
3550 struct dmar_domain *dmar_domain = domain->priv;
3551
3552 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3553 return dmar_domain->iommu_snooping;
3554
3555 return 0;
3556}
3557
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003558static struct iommu_ops intel_iommu_ops = {
3559 .domain_init = intel_iommu_domain_init,
3560 .domain_destroy = intel_iommu_domain_destroy,
3561 .attach_dev = intel_iommu_attach_device,
3562 .detach_dev = intel_iommu_detach_device,
3563 .map = intel_iommu_map_range,
3564 .unmap = intel_iommu_unmap_range,
3565 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003566 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003567};
David Woodhouse9af88142009-02-13 23:18:03 +00003568
3569static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3570{
3571 /*
3572 * Mobile 4 Series Chipset neglects to set RWBF capability,
3573 * but needs it:
3574 */
3575 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3576 rwbf_quirk = 1;
3577}
3578
3579DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);