blob: a490b39ca3d57b0ad10c6471c293dabc391b770a [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065#ifndef PHYSICAL_PAGE_MASK
66#define PHYSICAL_PAGE_MASK PAGE_MASK
67#endif
68
David Woodhousedd4e8312009-06-27 16:21:20 +010069/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
70 are never going to work. */
71static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
72{
73 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
74}
75
76static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
77{
78 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
79}
80static inline unsigned long page_to_dma_pfn(struct page *pg)
81{
82 return mm_to_dma_pfn(page_to_pfn(pg));
83}
84static inline unsigned long virt_to_dma_pfn(void *p)
85{
86 return page_to_dma_pfn(virt_to_page(p));
87}
88
Weidong Hand9630fe2008-12-08 11:06:32 +080089/* global iommu list, set NULL for ignored DMAR units */
90static struct intel_iommu **g_iommus;
91
David Woodhouse9af88142009-02-13 23:18:03 +000092static int rwbf_quirk;
93
Mark McLoughlin46b08e12008-11-20 15:49:44 +000094/*
95 * 0: Present
96 * 1-11: Reserved
97 * 12-63: Context Ptr (12 - (haw-1))
98 * 64-127: Reserved
99 */
100struct root_entry {
101 u64 val;
102 u64 rsvd1;
103};
104#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
105static inline bool root_present(struct root_entry *root)
106{
107 return (root->val & 1);
108}
109static inline void set_root_present(struct root_entry *root)
110{
111 root->val |= 1;
112}
113static inline void set_root_value(struct root_entry *root, unsigned long value)
114{
115 root->val |= value & VTD_PAGE_MASK;
116}
117
118static inline struct context_entry *
119get_context_addr_from_root(struct root_entry *root)
120{
121 return (struct context_entry *)
122 (root_present(root)?phys_to_virt(
123 root->val & VTD_PAGE_MASK) :
124 NULL);
125}
126
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000127/*
128 * low 64 bits:
129 * 0: present
130 * 1: fault processing disable
131 * 2-3: translation type
132 * 12-63: address space root
133 * high 64 bits:
134 * 0-2: address width
135 * 3-6: aval
136 * 8-23: domain id
137 */
138struct context_entry {
139 u64 lo;
140 u64 hi;
141};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000142
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000143static inline bool context_present(struct context_entry *context)
144{
145 return (context->lo & 1);
146}
147static inline void context_set_present(struct context_entry *context)
148{
149 context->lo |= 1;
150}
151
152static inline void context_set_fault_enable(struct context_entry *context)
153{
154 context->lo &= (((u64)-1) << 2) | 1;
155}
156
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000157static inline void context_set_translation_type(struct context_entry *context,
158 unsigned long value)
159{
160 context->lo &= (((u64)-1) << 4) | 3;
161 context->lo |= (value & 3) << 2;
162}
163
164static inline void context_set_address_root(struct context_entry *context,
165 unsigned long value)
166{
167 context->lo |= value & VTD_PAGE_MASK;
168}
169
170static inline void context_set_address_width(struct context_entry *context,
171 unsigned long value)
172{
173 context->hi |= value & 7;
174}
175
176static inline void context_set_domain_id(struct context_entry *context,
177 unsigned long value)
178{
179 context->hi |= (value & ((1 << 16) - 1)) << 8;
180}
181
182static inline void context_clear_entry(struct context_entry *context)
183{
184 context->lo = 0;
185 context->hi = 0;
186}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000187
Mark McLoughlin622ba122008-11-20 15:49:46 +0000188/*
189 * 0: readable
190 * 1: writable
191 * 2-6: reserved
192 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800193 * 8-10: available
194 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000195 * 12-63: Host physcial address
196 */
197struct dma_pte {
198 u64 val;
199};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000200
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000201static inline void dma_clear_pte(struct dma_pte *pte)
202{
203 pte->val = 0;
204}
205
206static inline void dma_set_pte_readable(struct dma_pte *pte)
207{
208 pte->val |= DMA_PTE_READ;
209}
210
211static inline void dma_set_pte_writable(struct dma_pte *pte)
212{
213 pte->val |= DMA_PTE_WRITE;
214}
215
Sheng Yang9cf066972009-03-18 15:33:07 +0800216static inline void dma_set_pte_snp(struct dma_pte *pte)
217{
218 pte->val |= DMA_PTE_SNP;
219}
220
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000221static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
222{
223 pte->val = (pte->val & ~3) | (prot & 3);
224}
225
226static inline u64 dma_pte_addr(struct dma_pte *pte)
227{
228 return (pte->val & VTD_PAGE_MASK);
229}
230
David Woodhousedd4e8312009-06-27 16:21:20 +0100231static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000232{
David Woodhousedd4e8312009-06-27 16:21:20 +0100233 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000234}
235
236static inline bool dma_pte_present(struct dma_pte *pte)
237{
238 return (pte->val & 3) != 0;
239}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000240
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700241/*
242 * This domain is a statically identity mapping domain.
243 * 1. This domain creats a static 1:1 mapping to all usable memory.
244 * 2. It maps to each iommu if successful.
245 * 3. Each iommu mapps to this domain if successful.
246 */
247struct dmar_domain *si_domain;
248
Weidong Han3b5410e2008-12-08 09:17:15 +0800249/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100250#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800251
Weidong Han1ce28fe2008-12-08 16:35:39 +0800252/* domain represents a virtual machine, more than one devices
253 * across iommus may be owned in one domain, e.g. kvm guest.
254 */
255#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
256
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700257/* si_domain contains mulitple devices */
258#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
259
Mark McLoughlin99126f72008-11-20 15:49:47 +0000260struct dmar_domain {
261 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800262 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000263
264 struct list_head devices; /* all devices' list */
265 struct iova_domain iovad; /* iova's that belong to this domain */
266
267 struct dma_pte *pgd; /* virtual address */
268 spinlock_t mapping_lock; /* page table lock */
269 int gaw; /* max guest address width */
270
271 /* adjusted guest address width, 0 is level 2 30-bit */
272 int agaw;
273
Weidong Han3b5410e2008-12-08 09:17:15 +0800274 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800275
276 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800277 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800278 int iommu_count; /* reference count of iommu */
279 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800280 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000281};
282
Mark McLoughlina647dac2008-11-20 15:49:48 +0000283/* PCI domain-device relationship */
284struct device_domain_info {
285 struct list_head link; /* link to domain siblings */
286 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100287 int segment; /* PCI domain */
288 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000289 u8 devfn; /* PCI devfn number */
290 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800291 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000292 struct dmar_domain *domain; /* pointer to domain */
293};
294
mark gross5e0d2a62008-03-04 15:22:08 -0800295static void flush_unmaps_timeout(unsigned long data);
296
297DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
298
mark gross80b20dd2008-04-18 13:53:58 -0700299#define HIGH_WATER_MARK 250
300struct deferred_flush_tables {
301 int next;
302 struct iova *iova[HIGH_WATER_MARK];
303 struct dmar_domain *domain[HIGH_WATER_MARK];
304};
305
306static struct deferred_flush_tables *deferred_flush;
307
mark gross5e0d2a62008-03-04 15:22:08 -0800308/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800309static int g_num_of_iommus;
310
311static DEFINE_SPINLOCK(async_umap_flush_lock);
312static LIST_HEAD(unmaps_to_do);
313
314static int timer_on;
315static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700317static void domain_remove_dev_info(struct dmar_domain *domain);
318
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800319#ifdef CONFIG_DMAR_DEFAULT_ON
320int dmar_disabled = 0;
321#else
322int dmar_disabled = 1;
323#endif /*CONFIG_DMAR_DEFAULT_ON*/
324
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700326static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800327static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700328
329#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
330static DEFINE_SPINLOCK(device_domain_lock);
331static LIST_HEAD(device_domain_list);
332
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100333static struct iommu_ops intel_iommu_ops;
334
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700335static int __init intel_iommu_setup(char *str)
336{
337 if (!str)
338 return -EINVAL;
339 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800340 if (!strncmp(str, "on", 2)) {
341 dmar_disabled = 0;
342 printk(KERN_INFO "Intel-IOMMU: enabled\n");
343 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700344 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800345 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700346 } else if (!strncmp(str, "igfx_off", 8)) {
347 dmar_map_gfx = 0;
348 printk(KERN_INFO
349 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700350 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800351 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700352 "Intel-IOMMU: Forcing DAC for PCI devices\n");
353 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800354 } else if (!strncmp(str, "strict", 6)) {
355 printk(KERN_INFO
356 "Intel-IOMMU: disable batched IOTLB flush\n");
357 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700358 }
359
360 str += strcspn(str, ",");
361 while (*str == ',')
362 str++;
363 }
364 return 0;
365}
366__setup("intel_iommu=", intel_iommu_setup);
367
368static struct kmem_cache *iommu_domain_cache;
369static struct kmem_cache *iommu_devinfo_cache;
370static struct kmem_cache *iommu_iova_cache;
371
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700372static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
373{
374 unsigned int flags;
375 void *vaddr;
376
377 /* trying to avoid low memory issues */
378 flags = current->flags & PF_MEMALLOC;
379 current->flags |= PF_MEMALLOC;
380 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
381 current->flags &= (~PF_MEMALLOC | flags);
382 return vaddr;
383}
384
385
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700386static inline void *alloc_pgtable_page(void)
387{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700388 unsigned int flags;
389 void *vaddr;
390
391 /* trying to avoid low memory issues */
392 flags = current->flags & PF_MEMALLOC;
393 current->flags |= PF_MEMALLOC;
394 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
395 current->flags &= (~PF_MEMALLOC | flags);
396 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700397}
398
399static inline void free_pgtable_page(void *vaddr)
400{
401 free_page((unsigned long)vaddr);
402}
403
404static inline void *alloc_domain_mem(void)
405{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700406 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407}
408
Kay, Allen M38717942008-09-09 18:37:29 +0300409static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700410{
411 kmem_cache_free(iommu_domain_cache, vaddr);
412}
413
414static inline void * alloc_devinfo_mem(void)
415{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700416 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700417}
418
419static inline void free_devinfo_mem(void *vaddr)
420{
421 kmem_cache_free(iommu_devinfo_cache, vaddr);
422}
423
424struct iova *alloc_iova_mem(void)
425{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700426 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427}
428
429void free_iova_mem(struct iova *iova)
430{
431 kmem_cache_free(iommu_iova_cache, iova);
432}
433
Weidong Han1b573682008-12-08 15:34:06 +0800434
435static inline int width_to_agaw(int width);
436
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700437static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800438{
439 unsigned long sagaw;
440 int agaw = -1;
441
442 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700443 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800444 agaw >= 0; agaw--) {
445 if (test_bit(agaw, &sagaw))
446 break;
447 }
448
449 return agaw;
450}
451
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700452/*
453 * Calculate max SAGAW for each iommu.
454 */
455int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
456{
457 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
458}
459
460/*
461 * calculate agaw for each iommu.
462 * "SAGAW" may be different across iommus, use a default agaw, and
463 * get a supported less agaw for iommus that don't support the default agaw.
464 */
465int iommu_calculate_agaw(struct intel_iommu *iommu)
466{
467 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
468}
469
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700470/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800471static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
472{
473 int iommu_id;
474
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700475 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800476 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700477 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800478
Weidong Han8c11e792008-12-08 15:29:22 +0800479 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
480 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
481 return NULL;
482
483 return g_iommus[iommu_id];
484}
485
Weidong Han8e6040972008-12-08 15:49:06 +0800486static void domain_update_iommu_coherency(struct dmar_domain *domain)
487{
488 int i;
489
490 domain->iommu_coherency = 1;
491
492 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
493 for (; i < g_num_of_iommus; ) {
494 if (!ecap_coherent(g_iommus[i]->ecap)) {
495 domain->iommu_coherency = 0;
496 break;
497 }
498 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
499 }
500}
501
Sheng Yang58c610b2009-03-18 15:33:05 +0800502static void domain_update_iommu_snooping(struct dmar_domain *domain)
503{
504 int i;
505
506 domain->iommu_snooping = 1;
507
508 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
509 for (; i < g_num_of_iommus; ) {
510 if (!ecap_sc_support(g_iommus[i]->ecap)) {
511 domain->iommu_snooping = 0;
512 break;
513 }
514 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
515 }
516}
517
518/* Some capabilities may be different across iommus */
519static void domain_update_iommu_cap(struct dmar_domain *domain)
520{
521 domain_update_iommu_coherency(domain);
522 domain_update_iommu_snooping(domain);
523}
524
David Woodhouse276dbf992009-04-04 01:45:37 +0100525static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800526{
527 struct dmar_drhd_unit *drhd = NULL;
528 int i;
529
530 for_each_drhd_unit(drhd) {
531 if (drhd->ignored)
532 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100533 if (segment != drhd->segment)
534 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800535
David Woodhouse924b6232009-04-04 00:39:25 +0100536 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000537 if (drhd->devices[i] &&
538 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800539 drhd->devices[i]->devfn == devfn)
540 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700541 if (drhd->devices[i] &&
542 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100543 drhd->devices[i]->subordinate->number <= bus &&
544 drhd->devices[i]->subordinate->subordinate >= bus)
545 return drhd->iommu;
546 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800547
548 if (drhd->include_all)
549 return drhd->iommu;
550 }
551
552 return NULL;
553}
554
Weidong Han5331fe62008-12-08 23:00:00 +0800555static void domain_flush_cache(struct dmar_domain *domain,
556 void *addr, int size)
557{
558 if (!domain->iommu_coherency)
559 clflush_cache_range(addr, size);
560}
561
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700562/* Gets context entry for a given bus and devfn */
563static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
564 u8 bus, u8 devfn)
565{
566 struct root_entry *root;
567 struct context_entry *context;
568 unsigned long phy_addr;
569 unsigned long flags;
570
571 spin_lock_irqsave(&iommu->lock, flags);
572 root = &iommu->root_entry[bus];
573 context = get_context_addr_from_root(root);
574 if (!context) {
575 context = (struct context_entry *)alloc_pgtable_page();
576 if (!context) {
577 spin_unlock_irqrestore(&iommu->lock, flags);
578 return NULL;
579 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700580 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700581 phy_addr = virt_to_phys((void *)context);
582 set_root_value(root, phy_addr);
583 set_root_present(root);
584 __iommu_flush_cache(iommu, root, sizeof(*root));
585 }
586 spin_unlock_irqrestore(&iommu->lock, flags);
587 return &context[devfn];
588}
589
590static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
591{
592 struct root_entry *root;
593 struct context_entry *context;
594 int ret;
595 unsigned long flags;
596
597 spin_lock_irqsave(&iommu->lock, flags);
598 root = &iommu->root_entry[bus];
599 context = get_context_addr_from_root(root);
600 if (!context) {
601 ret = 0;
602 goto out;
603 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000604 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700605out:
606 spin_unlock_irqrestore(&iommu->lock, flags);
607 return ret;
608}
609
610static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
611{
612 struct root_entry *root;
613 struct context_entry *context;
614 unsigned long flags;
615
616 spin_lock_irqsave(&iommu->lock, flags);
617 root = &iommu->root_entry[bus];
618 context = get_context_addr_from_root(root);
619 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000620 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700621 __iommu_flush_cache(iommu, &context[devfn], \
622 sizeof(*context));
623 }
624 spin_unlock_irqrestore(&iommu->lock, flags);
625}
626
627static void free_context_table(struct intel_iommu *iommu)
628{
629 struct root_entry *root;
630 int i;
631 unsigned long flags;
632 struct context_entry *context;
633
634 spin_lock_irqsave(&iommu->lock, flags);
635 if (!iommu->root_entry) {
636 goto out;
637 }
638 for (i = 0; i < ROOT_ENTRY_NR; i++) {
639 root = &iommu->root_entry[i];
640 context = get_context_addr_from_root(root);
641 if (context)
642 free_pgtable_page(context);
643 }
644 free_pgtable_page(iommu->root_entry);
645 iommu->root_entry = NULL;
646out:
647 spin_unlock_irqrestore(&iommu->lock, flags);
648}
649
650/* page table handling */
651#define LEVEL_STRIDE (9)
652#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
653
654static inline int agaw_to_level(int agaw)
655{
656 return agaw + 2;
657}
658
659static inline int agaw_to_width(int agaw)
660{
661 return 30 + agaw * LEVEL_STRIDE;
662
663}
664
665static inline int width_to_agaw(int width)
666{
667 return (width - 30) / LEVEL_STRIDE;
668}
669
670static inline unsigned int level_to_offset_bits(int level)
671{
David Woodhouse6660c632009-06-27 22:41:00 +0100672 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673}
674
David Woodhouse77dfa562009-06-27 16:40:08 +0100675static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700676{
David Woodhouse6660c632009-06-27 22:41:00 +0100677 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700678}
679
David Woodhouse6660c632009-06-27 22:41:00 +0100680static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700681{
David Woodhouse6660c632009-06-27 22:41:00 +0100682 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700683}
684
David Woodhouse6660c632009-06-27 22:41:00 +0100685static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700686{
David Woodhouse6660c632009-06-27 22:41:00 +0100687 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700688}
689
David Woodhouse6660c632009-06-27 22:41:00 +0100690static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700691{
David Woodhouse6660c632009-06-27 22:41:00 +0100692 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700693}
694
David Woodhouseb026fd22009-06-28 10:37:25 +0100695static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
696 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697{
David Woodhouseb026fd22009-06-28 10:37:25 +0100698 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700699 struct dma_pte *parent, *pte = NULL;
700 int level = agaw_to_level(domain->agaw);
701 int offset;
702 unsigned long flags;
703
704 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100705 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 parent = domain->pgd;
707
708 spin_lock_irqsave(&domain->mapping_lock, flags);
709 while (level > 0) {
710 void *tmp_page;
711
David Woodhouseb026fd22009-06-28 10:37:25 +0100712 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 pte = &parent[offset];
714 if (level == 1)
715 break;
716
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000717 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700718 tmp_page = alloc_pgtable_page();
719
720 if (!tmp_page) {
721 spin_unlock_irqrestore(&domain->mapping_lock,
722 flags);
723 return NULL;
724 }
Weidong Han5331fe62008-12-08 23:00:00 +0800725 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
David Woodhousedd4e8312009-06-27 16:21:20 +0100726 dma_set_pte_pfn(pte, virt_to_dma_pfn(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727 /*
728 * high level table always sets r/w, last level page
729 * table control read/write
730 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000731 dma_set_pte_readable(pte);
732 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800733 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700734 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000735 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700736 level--;
737 }
738
739 spin_unlock_irqrestore(&domain->mapping_lock, flags);
740 return pte;
741}
742
743/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100744static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
745 unsigned long pfn,
746 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747{
748 struct dma_pte *parent, *pte = NULL;
749 int total = agaw_to_level(domain->agaw);
750 int offset;
751
752 parent = domain->pgd;
753 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100754 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700755 pte = &parent[offset];
756 if (level == total)
757 return pte;
758
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000759 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700760 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000761 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762 total--;
763 }
764 return NULL;
765}
766
767/* clear one page's page table */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100768static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700769{
770 struct dma_pte *pte = NULL;
771
772 /* get last level pte */
David Woodhousea75f7cf2009-06-27 17:44:39 +0100773 pte = dma_pfn_level_pte(domain, pfn, 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774
775 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000776 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800777 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700778 }
779}
780
781/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100782static void dma_pte_clear_range(struct dmar_domain *domain,
783 unsigned long start_pfn,
784 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785{
David Woodhouse04b18e62009-06-27 19:15:01 +0100786 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700787
David Woodhouse04b18e62009-06-27 19:15:01 +0100788 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100789 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100790
David Woodhouse04b18e62009-06-27 19:15:01 +0100791 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100792 while (start_pfn <= last_pfn) {
David Woodhouse04b18e62009-06-27 19:15:01 +0100793 dma_pte_clear_one(domain, start_pfn);
794 start_pfn++;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700795 }
796}
797
798/* free page table pages. last level pte should already be cleared */
799static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100800 unsigned long start_pfn,
801 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802{
David Woodhouse6660c632009-06-27 22:41:00 +0100803 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 struct dma_pte *pte;
805 int total = agaw_to_level(domain->agaw);
806 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100807 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808
David Woodhouse6660c632009-06-27 22:41:00 +0100809 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
810 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811
812 /* we don't need lock here, nobody else touches the iova range */
813 level = 2;
814 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100815 tmp = align_to_level(start_pfn, level);
816
817 /* Only clear this pte/pmd if we're asked to clear its
818 _whole_ range */
819 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 return;
821
David Woodhouse6660c632009-06-27 22:41:00 +0100822 while (tmp <= last_pfn) {
823 pte = dma_pfn_level_pte(domain, tmp, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700824 if (pte) {
825 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000826 phys_to_virt(dma_pte_addr(pte)));
827 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800828 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829 }
830 tmp += level_size(level);
831 }
832 level++;
833 }
834 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100835 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700836 free_pgtable_page(domain->pgd);
837 domain->pgd = NULL;
838 }
839}
840
841/* iommu handling */
842static int iommu_alloc_root_entry(struct intel_iommu *iommu)
843{
844 struct root_entry *root;
845 unsigned long flags;
846
847 root = (struct root_entry *)alloc_pgtable_page();
848 if (!root)
849 return -ENOMEM;
850
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700851 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700852
853 spin_lock_irqsave(&iommu->lock, flags);
854 iommu->root_entry = root;
855 spin_unlock_irqrestore(&iommu->lock, flags);
856
857 return 0;
858}
859
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860static void iommu_set_root_entry(struct intel_iommu *iommu)
861{
862 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100863 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864 unsigned long flag;
865
866 addr = iommu->root_entry;
867
868 spin_lock_irqsave(&iommu->register_lock, flag);
869 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
870
David Woodhousec416daa2009-05-10 20:30:58 +0100871 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700872
873 /* Make sure hardware complete it */
874 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100875 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876
877 spin_unlock_irqrestore(&iommu->register_lock, flag);
878}
879
880static void iommu_flush_write_buffer(struct intel_iommu *iommu)
881{
882 u32 val;
883 unsigned long flag;
884
David Woodhouse9af88142009-02-13 23:18:03 +0000885 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700886 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700887
888 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100889 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890
891 /* Make sure hardware complete it */
892 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100893 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894
895 spin_unlock_irqrestore(&iommu->register_lock, flag);
896}
897
898/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100899static void __iommu_flush_context(struct intel_iommu *iommu,
900 u16 did, u16 source_id, u8 function_mask,
901 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700902{
903 u64 val = 0;
904 unsigned long flag;
905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906 switch (type) {
907 case DMA_CCMD_GLOBAL_INVL:
908 val = DMA_CCMD_GLOBAL_INVL;
909 break;
910 case DMA_CCMD_DOMAIN_INVL:
911 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
912 break;
913 case DMA_CCMD_DEVICE_INVL:
914 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
915 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
916 break;
917 default:
918 BUG();
919 }
920 val |= DMA_CCMD_ICC;
921
922 spin_lock_irqsave(&iommu->register_lock, flag);
923 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
924
925 /* Make sure hardware complete it */
926 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
927 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
928
929 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930}
931
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100933static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
934 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935{
936 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
937 u64 val = 0, val_iva = 0;
938 unsigned long flag;
939
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700940 switch (type) {
941 case DMA_TLB_GLOBAL_FLUSH:
942 /* global flush doesn't need set IVA_REG */
943 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
944 break;
945 case DMA_TLB_DSI_FLUSH:
946 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
947 break;
948 case DMA_TLB_PSI_FLUSH:
949 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
950 /* Note: always flush non-leaf currently */
951 val_iva = size_order | addr;
952 break;
953 default:
954 BUG();
955 }
956 /* Note: set drain read/write */
957#if 0
958 /*
959 * This is probably to be super secure.. Looks like we can
960 * ignore it without any impact.
961 */
962 if (cap_read_drain(iommu->cap))
963 val |= DMA_TLB_READ_DRAIN;
964#endif
965 if (cap_write_drain(iommu->cap))
966 val |= DMA_TLB_WRITE_DRAIN;
967
968 spin_lock_irqsave(&iommu->register_lock, flag);
969 /* Note: Only uses first TLB reg currently */
970 if (val_iva)
971 dmar_writeq(iommu->reg + tlb_offset, val_iva);
972 dmar_writeq(iommu->reg + tlb_offset + 8, val);
973
974 /* Make sure hardware complete it */
975 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
976 dmar_readq, (!(val & DMA_TLB_IVT)), val);
977
978 spin_unlock_irqrestore(&iommu->register_lock, flag);
979
980 /* check IOTLB invalidation granularity */
981 if (DMA_TLB_IAIG(val) == 0)
982 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
983 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
984 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700985 (unsigned long long)DMA_TLB_IIRG(type),
986 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987}
988
Yu Zhao93a23a72009-05-18 13:51:37 +0800989static struct device_domain_info *iommu_support_dev_iotlb(
990 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991{
Yu Zhao93a23a72009-05-18 13:51:37 +0800992 int found = 0;
993 unsigned long flags;
994 struct device_domain_info *info;
995 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
996
997 if (!ecap_dev_iotlb_support(iommu->ecap))
998 return NULL;
999
1000 if (!iommu->qi)
1001 return NULL;
1002
1003 spin_lock_irqsave(&device_domain_lock, flags);
1004 list_for_each_entry(info, &domain->devices, link)
1005 if (info->bus == bus && info->devfn == devfn) {
1006 found = 1;
1007 break;
1008 }
1009 spin_unlock_irqrestore(&device_domain_lock, flags);
1010
1011 if (!found || !info->dev)
1012 return NULL;
1013
1014 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1015 return NULL;
1016
1017 if (!dmar_find_matched_atsr_unit(info->dev))
1018 return NULL;
1019
1020 info->iommu = iommu;
1021
1022 return info;
1023}
1024
1025static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1026{
1027 if (!info)
1028 return;
1029
1030 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1031}
1032
1033static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1034{
1035 if (!info->dev || !pci_ats_enabled(info->dev))
1036 return;
1037
1038 pci_disable_ats(info->dev);
1039}
1040
1041static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1042 u64 addr, unsigned mask)
1043{
1044 u16 sid, qdep;
1045 unsigned long flags;
1046 struct device_domain_info *info;
1047
1048 spin_lock_irqsave(&device_domain_lock, flags);
1049 list_for_each_entry(info, &domain->devices, link) {
1050 if (!info->dev || !pci_ats_enabled(info->dev))
1051 continue;
1052
1053 sid = info->bus << 8 | info->devfn;
1054 qdep = pci_ats_queue_depth(info->dev);
1055 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1056 }
1057 spin_unlock_irqrestore(&device_domain_lock, flags);
1058}
1059
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001060static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1061 u64 addr, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001062{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001063 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001064
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001065 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066 BUG_ON(pages == 0);
1067
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001069 * Fallback to domain selective flush if no PSI support or the size is
1070 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001071 * PSI requires page size to be 2 ^ x, and the base address is naturally
1072 * aligned to the size
1073 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001074 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1075 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001076 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001077 else
1078 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1079 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001080
1081 /*
1082 * In caching mode, domain ID 0 is reserved for non-present to present
1083 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1084 */
1085 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001086 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001087}
1088
mark grossf8bab732008-02-08 04:18:38 -08001089static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1090{
1091 u32 pmen;
1092 unsigned long flags;
1093
1094 spin_lock_irqsave(&iommu->register_lock, flags);
1095 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1096 pmen &= ~DMA_PMEN_EPM;
1097 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1098
1099 /* wait for the protected region status bit to clear */
1100 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1101 readl, !(pmen & DMA_PMEN_PRS), pmen);
1102
1103 spin_unlock_irqrestore(&iommu->register_lock, flags);
1104}
1105
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001106static int iommu_enable_translation(struct intel_iommu *iommu)
1107{
1108 u32 sts;
1109 unsigned long flags;
1110
1111 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001112 iommu->gcmd |= DMA_GCMD_TE;
1113 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001114
1115 /* Make sure hardware complete it */
1116 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001117 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001118
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001119 spin_unlock_irqrestore(&iommu->register_lock, flags);
1120 return 0;
1121}
1122
1123static int iommu_disable_translation(struct intel_iommu *iommu)
1124{
1125 u32 sts;
1126 unsigned long flag;
1127
1128 spin_lock_irqsave(&iommu->register_lock, flag);
1129 iommu->gcmd &= ~DMA_GCMD_TE;
1130 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1131
1132 /* Make sure hardware complete it */
1133 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001134 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001135
1136 spin_unlock_irqrestore(&iommu->register_lock, flag);
1137 return 0;
1138}
1139
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001140
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141static int iommu_init_domains(struct intel_iommu *iommu)
1142{
1143 unsigned long ndomains;
1144 unsigned long nlongs;
1145
1146 ndomains = cap_ndoms(iommu->cap);
1147 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1148 nlongs = BITS_TO_LONGS(ndomains);
1149
1150 /* TBD: there might be 64K domains,
1151 * consider other allocation for future chip
1152 */
1153 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1154 if (!iommu->domain_ids) {
1155 printk(KERN_ERR "Allocating domain id array failed\n");
1156 return -ENOMEM;
1157 }
1158 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1159 GFP_KERNEL);
1160 if (!iommu->domains) {
1161 printk(KERN_ERR "Allocating domain array failed\n");
1162 kfree(iommu->domain_ids);
1163 return -ENOMEM;
1164 }
1165
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001166 spin_lock_init(&iommu->lock);
1167
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001168 /*
1169 * if Caching mode is set, then invalid translations are tagged
1170 * with domainid 0. Hence we need to pre-allocate it.
1171 */
1172 if (cap_caching_mode(iommu->cap))
1173 set_bit(0, iommu->domain_ids);
1174 return 0;
1175}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001177
1178static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001179static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001180
1181void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001182{
1183 struct dmar_domain *domain;
1184 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001185 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001186
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001187 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1188 for (; i < cap_ndoms(iommu->cap); ) {
1189 domain = iommu->domains[i];
1190 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001191
1192 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001193 if (--domain->iommu_count == 0) {
1194 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1195 vm_domain_exit(domain);
1196 else
1197 domain_exit(domain);
1198 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001199 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201 i = find_next_bit(iommu->domain_ids,
1202 cap_ndoms(iommu->cap), i+1);
1203 }
1204
1205 if (iommu->gcmd & DMA_GCMD_TE)
1206 iommu_disable_translation(iommu);
1207
1208 if (iommu->irq) {
1209 set_irq_data(iommu->irq, NULL);
1210 /* This will mask the irq */
1211 free_irq(iommu->irq, iommu);
1212 destroy_irq(iommu->irq);
1213 }
1214
1215 kfree(iommu->domains);
1216 kfree(iommu->domain_ids);
1217
Weidong Hand9630fe2008-12-08 11:06:32 +08001218 g_iommus[iommu->seq_id] = NULL;
1219
1220 /* if all iommus are freed, free g_iommus */
1221 for (i = 0; i < g_num_of_iommus; i++) {
1222 if (g_iommus[i])
1223 break;
1224 }
1225
1226 if (i == g_num_of_iommus)
1227 kfree(g_iommus);
1228
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229 /* free context mapping */
1230 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231}
1232
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001233static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236
1237 domain = alloc_domain_mem();
1238 if (!domain)
1239 return NULL;
1240
Weidong Han8c11e792008-12-08 15:29:22 +08001241 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001242 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243
1244 return domain;
1245}
1246
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001247static int iommu_attach_domain(struct dmar_domain *domain,
1248 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001249{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001250 int num;
1251 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252 unsigned long flags;
1253
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001254 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001255
1256 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001257
1258 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1259 if (num >= ndomains) {
1260 spin_unlock_irqrestore(&iommu->lock, flags);
1261 printk(KERN_ERR "IOMMU: no free domain ids\n");
1262 return -ENOMEM;
1263 }
1264
1265 domain->id = num;
1266 set_bit(num, iommu->domain_ids);
1267 set_bit(iommu->seq_id, &domain->iommu_bmp);
1268 iommu->domains[num] = domain;
1269 spin_unlock_irqrestore(&iommu->lock, flags);
1270
1271 return 0;
1272}
1273
1274static void iommu_detach_domain(struct dmar_domain *domain,
1275 struct intel_iommu *iommu)
1276{
1277 unsigned long flags;
1278 int num, ndomains;
1279 int found = 0;
1280
1281 spin_lock_irqsave(&iommu->lock, flags);
1282 ndomains = cap_ndoms(iommu->cap);
1283 num = find_first_bit(iommu->domain_ids, ndomains);
1284 for (; num < ndomains; ) {
1285 if (iommu->domains[num] == domain) {
1286 found = 1;
1287 break;
1288 }
1289 num = find_next_bit(iommu->domain_ids,
1290 cap_ndoms(iommu->cap), num+1);
1291 }
1292
1293 if (found) {
1294 clear_bit(num, iommu->domain_ids);
1295 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1296 iommu->domains[num] = NULL;
1297 }
Weidong Han8c11e792008-12-08 15:29:22 +08001298 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001299}
1300
1301static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001302static struct lock_class_key reserved_alloc_key;
1303static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304
1305static void dmar_init_reserved_ranges(void)
1306{
1307 struct pci_dev *pdev = NULL;
1308 struct iova *iova;
1309 int i;
1310 u64 addr, size;
1311
David Millerf6611972008-02-06 01:36:23 -08001312 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001313
Mark Gross8a443df2008-03-04 14:59:31 -08001314 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1315 &reserved_alloc_key);
1316 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1317 &reserved_rbtree_key);
1318
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001319 /* IOAPIC ranges shouldn't be accessed by DMA */
1320 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1321 IOVA_PFN(IOAPIC_RANGE_END));
1322 if (!iova)
1323 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1324
1325 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1326 for_each_pci_dev(pdev) {
1327 struct resource *r;
1328
1329 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1330 r = &pdev->resource[i];
1331 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1332 continue;
1333 addr = r->start;
David Woodhousefd18de52009-05-10 23:57:41 +01001334 addr &= PHYSICAL_PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001336 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001337 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1338 IOVA_PFN(size + addr) - 1);
1339 if (!iova)
1340 printk(KERN_ERR "Reserve iova failed\n");
1341 }
1342 }
1343
1344}
1345
1346static void domain_reserve_special_ranges(struct dmar_domain *domain)
1347{
1348 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1349}
1350
1351static inline int guestwidth_to_adjustwidth(int gaw)
1352{
1353 int agaw;
1354 int r = (gaw - 12) % 9;
1355
1356 if (r == 0)
1357 agaw = gaw;
1358 else
1359 agaw = gaw + 9 - r;
1360 if (agaw > 64)
1361 agaw = 64;
1362 return agaw;
1363}
1364
1365static int domain_init(struct dmar_domain *domain, int guest_width)
1366{
1367 struct intel_iommu *iommu;
1368 int adjust_width, agaw;
1369 unsigned long sagaw;
1370
David Millerf6611972008-02-06 01:36:23 -08001371 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001372 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001373 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374
1375 domain_reserve_special_ranges(domain);
1376
1377 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001378 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001379 if (guest_width > cap_mgaw(iommu->cap))
1380 guest_width = cap_mgaw(iommu->cap);
1381 domain->gaw = guest_width;
1382 adjust_width = guestwidth_to_adjustwidth(guest_width);
1383 agaw = width_to_agaw(adjust_width);
1384 sagaw = cap_sagaw(iommu->cap);
1385 if (!test_bit(agaw, &sagaw)) {
1386 /* hardware doesn't support it, choose a bigger one */
1387 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1388 agaw = find_next_bit(&sagaw, 5, agaw);
1389 if (agaw >= 5)
1390 return -ENODEV;
1391 }
1392 domain->agaw = agaw;
1393 INIT_LIST_HEAD(&domain->devices);
1394
Weidong Han8e6040972008-12-08 15:49:06 +08001395 if (ecap_coherent(iommu->ecap))
1396 domain->iommu_coherency = 1;
1397 else
1398 domain->iommu_coherency = 0;
1399
Sheng Yang58c610b2009-03-18 15:33:05 +08001400 if (ecap_sc_support(iommu->ecap))
1401 domain->iommu_snooping = 1;
1402 else
1403 domain->iommu_snooping = 0;
1404
Weidong Hanc7151a82008-12-08 22:51:37 +08001405 domain->iommu_count = 1;
1406
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001407 /* always allocate the top pgd */
1408 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1409 if (!domain->pgd)
1410 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001411 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412 return 0;
1413}
1414
1415static void domain_exit(struct dmar_domain *domain)
1416{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001417 struct dmar_drhd_unit *drhd;
1418 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001419
1420 /* Domain 0 is reserved, so dont process it */
1421 if (!domain)
1422 return;
1423
1424 domain_remove_dev_info(domain);
1425 /* destroy iovas */
1426 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001427
1428 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001429 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430
1431 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001432 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001433
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001434 for_each_active_iommu(iommu, drhd)
1435 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1436 iommu_detach_domain(domain, iommu);
1437
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001438 free_domain_mem(domain);
1439}
1440
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001441static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1442 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001443{
1444 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001446 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001447 struct dma_pte *pgd;
1448 unsigned long num;
1449 unsigned long ndomains;
1450 int id;
1451 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001452 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001453
1454 pr_debug("Set context mapping for %02x:%02x.%d\n",
1455 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001456
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001457 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001458 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1459 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001460
David Woodhouse276dbf992009-04-04 01:45:37 +01001461 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001462 if (!iommu)
1463 return -ENODEV;
1464
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001465 context = device_to_context_entry(iommu, bus, devfn);
1466 if (!context)
1467 return -ENOMEM;
1468 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001469 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470 spin_unlock_irqrestore(&iommu->lock, flags);
1471 return 0;
1472 }
1473
Weidong Hanea6606b2008-12-08 23:08:15 +08001474 id = domain->id;
1475 pgd = domain->pgd;
1476
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001477 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1478 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001479 int found = 0;
1480
1481 /* find an available domain id for this device in iommu */
1482 ndomains = cap_ndoms(iommu->cap);
1483 num = find_first_bit(iommu->domain_ids, ndomains);
1484 for (; num < ndomains; ) {
1485 if (iommu->domains[num] == domain) {
1486 id = num;
1487 found = 1;
1488 break;
1489 }
1490 num = find_next_bit(iommu->domain_ids,
1491 cap_ndoms(iommu->cap), num+1);
1492 }
1493
1494 if (found == 0) {
1495 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1496 if (num >= ndomains) {
1497 spin_unlock_irqrestore(&iommu->lock, flags);
1498 printk(KERN_ERR "IOMMU: no free domain ids\n");
1499 return -EFAULT;
1500 }
1501
1502 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001503 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001504 iommu->domains[num] = domain;
1505 id = num;
1506 }
1507
1508 /* Skip top levels of page tables for
1509 * iommu which has less agaw than default.
1510 */
1511 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1512 pgd = phys_to_virt(dma_pte_addr(pgd));
1513 if (!dma_pte_present(pgd)) {
1514 spin_unlock_irqrestore(&iommu->lock, flags);
1515 return -ENOMEM;
1516 }
1517 }
1518 }
1519
1520 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001521
Yu Zhao93a23a72009-05-18 13:51:37 +08001522 if (translation != CONTEXT_TT_PASS_THROUGH) {
1523 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1524 translation = info ? CONTEXT_TT_DEV_IOTLB :
1525 CONTEXT_TT_MULTI_LEVEL;
1526 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001527 /*
1528 * In pass through mode, AW must be programmed to indicate the largest
1529 * AGAW value supported by hardware. And ASR is ignored by hardware.
1530 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001531 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001532 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001533 else {
1534 context_set_address_root(context, virt_to_phys(pgd));
1535 context_set_address_width(context, iommu->agaw);
1536 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001537
1538 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001539 context_set_fault_enable(context);
1540 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001541 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001543 /*
1544 * It's a non-present to present mapping. If hardware doesn't cache
1545 * non-present entry we only need to flush the write-buffer. If the
1546 * _does_ cache non-present entries, then it does so in the special
1547 * domain #0, which we have to flush:
1548 */
1549 if (cap_caching_mode(iommu->cap)) {
1550 iommu->flush.flush_context(iommu, 0,
1551 (((u16)bus) << 8) | devfn,
1552 DMA_CCMD_MASK_NOBIT,
1553 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001554 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001555 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001556 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001557 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001558 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001559 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001560
1561 spin_lock_irqsave(&domain->iommu_lock, flags);
1562 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1563 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001564 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001565 }
1566 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001567 return 0;
1568}
1569
1570static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001571domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1572 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001573{
1574 int ret;
1575 struct pci_dev *tmp, *parent;
1576
David Woodhouse276dbf992009-04-04 01:45:37 +01001577 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001578 pdev->bus->number, pdev->devfn,
1579 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001580 if (ret)
1581 return ret;
1582
1583 /* dependent device mapping */
1584 tmp = pci_find_upstream_pcie_bridge(pdev);
1585 if (!tmp)
1586 return 0;
1587 /* Secondary interface's bus number and devfn 0 */
1588 parent = pdev->bus->self;
1589 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001590 ret = domain_context_mapping_one(domain,
1591 pci_domain_nr(parent->bus),
1592 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001593 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001594 if (ret)
1595 return ret;
1596 parent = parent->bus->self;
1597 }
1598 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1599 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001600 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001601 tmp->subordinate->number, 0,
1602 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001603 else /* this is a legacy PCI bridge */
1604 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001605 pci_domain_nr(tmp->bus),
1606 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001607 tmp->devfn,
1608 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001609}
1610
Weidong Han5331fe62008-12-08 23:00:00 +08001611static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001612{
1613 int ret;
1614 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001615 struct intel_iommu *iommu;
1616
David Woodhouse276dbf992009-04-04 01:45:37 +01001617 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1618 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001619 if (!iommu)
1620 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001621
David Woodhouse276dbf992009-04-04 01:45:37 +01001622 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001623 if (!ret)
1624 return ret;
1625 /* dependent device mapping */
1626 tmp = pci_find_upstream_pcie_bridge(pdev);
1627 if (!tmp)
1628 return ret;
1629 /* Secondary interface's bus number and devfn 0 */
1630 parent = pdev->bus->self;
1631 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001632 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001633 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634 if (!ret)
1635 return ret;
1636 parent = parent->bus->self;
1637 }
1638 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001639 return device_context_mapped(iommu, tmp->subordinate->number,
1640 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001641 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001642 return device_context_mapped(iommu, tmp->bus->number,
1643 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644}
1645
David Woodhouse61df7442009-06-28 11:55:58 +01001646static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1647 unsigned long phys_pfn, unsigned long nr_pages,
1648 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650 struct dma_pte *pte;
David Woodhouse1c5a46e2009-06-28 10:53:37 +01001651 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001652
David Woodhouse61df7442009-06-28 11:55:58 +01001653 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654
1655 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1656 return -EINVAL;
David Woodhouse1c5a46e2009-06-28 10:53:37 +01001657
David Woodhouse61df7442009-06-28 11:55:58 +01001658 while (nr_pages--) {
1659 pte = pfn_to_dma_pte(domain, iov_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660 if (!pte)
1661 return -ENOMEM;
1662 /* We don't need lock here, nobody else
1663 * touches the iova range
1664 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001665 BUG_ON(dma_pte_addr(pte));
David Woodhouse61df7442009-06-28 11:55:58 +01001666 dma_set_pte_pfn(pte, phys_pfn);
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001667 dma_set_pte_prot(pte, prot);
Sheng Yang9cf066972009-03-18 15:33:07 +08001668 if (prot & DMA_PTE_SNP)
1669 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001670 domain_flush_cache(domain, pte, sizeof(*pte));
David Woodhouse61df7442009-06-28 11:55:58 +01001671 iov_pfn++;
1672 phys_pfn++;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673 }
1674 return 0;
1675}
1676
David Woodhouse61df7442009-06-28 11:55:58 +01001677static int domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1678 u64 hpa, size_t size, int prot)
1679{
1680 unsigned long first_pfn = hpa >> VTD_PAGE_SHIFT;
1681 unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
1682
1683 return domain_pfn_mapping(domain, iova >> VTD_PAGE_SHIFT, first_pfn,
1684 last_pfn - first_pfn + 1, prot);
1685
1686}
1687
Weidong Hanc7151a82008-12-08 22:51:37 +08001688static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689{
Weidong Hanc7151a82008-12-08 22:51:37 +08001690 if (!iommu)
1691 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001692
1693 clear_context_table(iommu, bus, devfn);
1694 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001695 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001696 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697}
1698
1699static void domain_remove_dev_info(struct dmar_domain *domain)
1700{
1701 struct device_domain_info *info;
1702 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001703 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704
1705 spin_lock_irqsave(&device_domain_lock, flags);
1706 while (!list_empty(&domain->devices)) {
1707 info = list_entry(domain->devices.next,
1708 struct device_domain_info, link);
1709 list_del(&info->link);
1710 list_del(&info->global);
1711 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001712 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713 spin_unlock_irqrestore(&device_domain_lock, flags);
1714
Yu Zhao93a23a72009-05-18 13:51:37 +08001715 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001716 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001717 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718 free_devinfo_mem(info);
1719
1720 spin_lock_irqsave(&device_domain_lock, flags);
1721 }
1722 spin_unlock_irqrestore(&device_domain_lock, flags);
1723}
1724
1725/*
1726 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001727 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728 */
Kay, Allen M38717942008-09-09 18:37:29 +03001729static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001730find_domain(struct pci_dev *pdev)
1731{
1732 struct device_domain_info *info;
1733
1734 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001735 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736 if (info)
1737 return info->domain;
1738 return NULL;
1739}
1740
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001741/* domain is initialized */
1742static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1743{
1744 struct dmar_domain *domain, *found = NULL;
1745 struct intel_iommu *iommu;
1746 struct dmar_drhd_unit *drhd;
1747 struct device_domain_info *info, *tmp;
1748 struct pci_dev *dev_tmp;
1749 unsigned long flags;
1750 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001751 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001752 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001753
1754 domain = find_domain(pdev);
1755 if (domain)
1756 return domain;
1757
David Woodhouse276dbf992009-04-04 01:45:37 +01001758 segment = pci_domain_nr(pdev->bus);
1759
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001760 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1761 if (dev_tmp) {
1762 if (dev_tmp->is_pcie) {
1763 bus = dev_tmp->subordinate->number;
1764 devfn = 0;
1765 } else {
1766 bus = dev_tmp->bus->number;
1767 devfn = dev_tmp->devfn;
1768 }
1769 spin_lock_irqsave(&device_domain_lock, flags);
1770 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001771 if (info->segment == segment &&
1772 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001773 found = info->domain;
1774 break;
1775 }
1776 }
1777 spin_unlock_irqrestore(&device_domain_lock, flags);
1778 /* pcie-pci bridge already has a domain, uses it */
1779 if (found) {
1780 domain = found;
1781 goto found_domain;
1782 }
1783 }
1784
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001785 domain = alloc_domain();
1786 if (!domain)
1787 goto error;
1788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001789 /* Allocate new domain for the device */
1790 drhd = dmar_find_matched_drhd_unit(pdev);
1791 if (!drhd) {
1792 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1793 pci_name(pdev));
1794 return NULL;
1795 }
1796 iommu = drhd->iommu;
1797
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001798 ret = iommu_attach_domain(domain, iommu);
1799 if (ret) {
1800 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001801 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001802 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803
1804 if (domain_init(domain, gaw)) {
1805 domain_exit(domain);
1806 goto error;
1807 }
1808
1809 /* register pcie-to-pci device */
1810 if (dev_tmp) {
1811 info = alloc_devinfo_mem();
1812 if (!info) {
1813 domain_exit(domain);
1814 goto error;
1815 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001816 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001817 info->bus = bus;
1818 info->devfn = devfn;
1819 info->dev = NULL;
1820 info->domain = domain;
1821 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001822 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001823
1824 /* pcie-to-pci bridge already has a domain, uses it */
1825 found = NULL;
1826 spin_lock_irqsave(&device_domain_lock, flags);
1827 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001828 if (tmp->segment == segment &&
1829 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001830 found = tmp->domain;
1831 break;
1832 }
1833 }
1834 if (found) {
1835 free_devinfo_mem(info);
1836 domain_exit(domain);
1837 domain = found;
1838 } else {
1839 list_add(&info->link, &domain->devices);
1840 list_add(&info->global, &device_domain_list);
1841 }
1842 spin_unlock_irqrestore(&device_domain_lock, flags);
1843 }
1844
1845found_domain:
1846 info = alloc_devinfo_mem();
1847 if (!info)
1848 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001849 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001850 info->bus = pdev->bus->number;
1851 info->devfn = pdev->devfn;
1852 info->dev = pdev;
1853 info->domain = domain;
1854 spin_lock_irqsave(&device_domain_lock, flags);
1855 /* somebody is fast */
1856 found = find_domain(pdev);
1857 if (found != NULL) {
1858 spin_unlock_irqrestore(&device_domain_lock, flags);
1859 if (found != domain) {
1860 domain_exit(domain);
1861 domain = found;
1862 }
1863 free_devinfo_mem(info);
1864 return domain;
1865 }
1866 list_add(&info->link, &domain->devices);
1867 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001868 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 spin_unlock_irqrestore(&device_domain_lock, flags);
1870 return domain;
1871error:
1872 /* recheck it here, maybe others set it */
1873 return find_domain(pdev);
1874}
1875
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001876static int iommu_identity_mapping;
1877
David Woodhouseb2132032009-06-26 18:50:28 +01001878static int iommu_domain_identity_map(struct dmar_domain *domain,
1879 unsigned long long start,
1880 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001883 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001884
1885 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001886 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001887 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001888 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1890 IOVA_PFN(base + size) - 1)) {
1891 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001892 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001893 }
1894
David Woodhouseb2132032009-06-26 18:50:28 +01001895 pr_debug("Mapping reserved region %lx@%llx for domain %d\n",
1896 size, base, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001897 /*
1898 * RMRR range might have overlap with physical memory range,
1899 * clear it first
1900 */
David Woodhouse595badf2009-06-27 22:09:11 +01001901 dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
1902 (base + size - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903
David Woodhouse61df7442009-06-28 11:55:58 +01001904 return domain_pfn_mapping(domain, base >> VTD_PAGE_SHIFT,
1905 base >> VTD_PAGE_SHIFT,
1906 size >> VTD_PAGE_SHIFT,
1907 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001908}
1909
1910static int iommu_prepare_identity_map(struct pci_dev *pdev,
1911 unsigned long long start,
1912 unsigned long long end)
1913{
1914 struct dmar_domain *domain;
1915 int ret;
1916
1917 printk(KERN_INFO
1918 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1919 pci_name(pdev), start, end);
1920
David Woodhousec7ab48d2009-06-26 19:10:36 +01001921 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001922 if (!domain)
1923 return -ENOMEM;
1924
1925 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 if (ret)
1927 goto error;
1928
1929 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001930 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001931 if (ret)
1932 goto error;
1933
1934 return 0;
1935
1936 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937 domain_exit(domain);
1938 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001939}
1940
1941static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1942 struct pci_dev *pdev)
1943{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001944 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 return 0;
1946 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1947 rmrr->end_address + 1);
1948}
1949
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001950#ifdef CONFIG_DMAR_FLOPPY_WA
1951static inline void iommu_prepare_isa(void)
1952{
1953 struct pci_dev *pdev;
1954 int ret;
1955
1956 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1957 if (!pdev)
1958 return;
1959
David Woodhousec7ab48d2009-06-26 19:10:36 +01001960 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001961 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1962
1963 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001964 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1965 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001966
1967}
1968#else
1969static inline void iommu_prepare_isa(void)
1970{
1971 return;
1972}
1973#endif /* !CONFIG_DMAR_FLPY_WA */
1974
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001975/* Initialize each context entry as pass through.*/
1976static int __init init_context_pass_through(void)
1977{
1978 struct pci_dev *pdev = NULL;
1979 struct dmar_domain *domain;
1980 int ret;
1981
1982 for_each_pci_dev(pdev) {
1983 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1984 ret = domain_context_mapping(domain, pdev,
1985 CONTEXT_TT_PASS_THROUGH);
1986 if (ret)
1987 return ret;
1988 }
1989 return 0;
1990}
1991
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001992static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01001993
1994static int __init si_domain_work_fn(unsigned long start_pfn,
1995 unsigned long end_pfn, void *datax)
1996{
1997 int *ret = datax;
1998
1999 *ret = iommu_domain_identity_map(si_domain,
2000 (uint64_t)start_pfn << PAGE_SHIFT,
2001 (uint64_t)end_pfn << PAGE_SHIFT);
2002 return *ret;
2003
2004}
2005
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002006static int si_domain_init(void)
2007{
2008 struct dmar_drhd_unit *drhd;
2009 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002010 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002011
2012 si_domain = alloc_domain();
2013 if (!si_domain)
2014 return -EFAULT;
2015
David Woodhousec7ab48d2009-06-26 19:10:36 +01002016 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002017
2018 for_each_active_iommu(iommu, drhd) {
2019 ret = iommu_attach_domain(si_domain, iommu);
2020 if (ret) {
2021 domain_exit(si_domain);
2022 return -EFAULT;
2023 }
2024 }
2025
2026 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2027 domain_exit(si_domain);
2028 return -EFAULT;
2029 }
2030
2031 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2032
David Woodhousec7ab48d2009-06-26 19:10:36 +01002033 for_each_online_node(nid) {
2034 work_with_active_regions(nid, si_domain_work_fn, &ret);
2035 if (ret)
2036 return ret;
2037 }
2038
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002039 return 0;
2040}
2041
2042static void domain_remove_one_dev_info(struct dmar_domain *domain,
2043 struct pci_dev *pdev);
2044static int identity_mapping(struct pci_dev *pdev)
2045{
2046 struct device_domain_info *info;
2047
2048 if (likely(!iommu_identity_mapping))
2049 return 0;
2050
2051
2052 list_for_each_entry(info, &si_domain->devices, link)
2053 if (info->dev == pdev)
2054 return 1;
2055 return 0;
2056}
2057
2058static int domain_add_dev_info(struct dmar_domain *domain,
2059 struct pci_dev *pdev)
2060{
2061 struct device_domain_info *info;
2062 unsigned long flags;
2063
2064 info = alloc_devinfo_mem();
2065 if (!info)
2066 return -ENOMEM;
2067
2068 info->segment = pci_domain_nr(pdev->bus);
2069 info->bus = pdev->bus->number;
2070 info->devfn = pdev->devfn;
2071 info->dev = pdev;
2072 info->domain = domain;
2073
2074 spin_lock_irqsave(&device_domain_lock, flags);
2075 list_add(&info->link, &domain->devices);
2076 list_add(&info->global, &device_domain_list);
2077 pdev->dev.archdata.iommu = info;
2078 spin_unlock_irqrestore(&device_domain_lock, flags);
2079
2080 return 0;
2081}
2082
2083static int iommu_prepare_static_identity_mapping(void)
2084{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002085 struct pci_dev *pdev = NULL;
2086 int ret;
2087
2088 ret = si_domain_init();
2089 if (ret)
2090 return -EFAULT;
2091
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002092 for_each_pci_dev(pdev) {
David Woodhousec7ab48d2009-06-26 19:10:36 +01002093 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2094 pci_name(pdev));
2095
2096 ret = domain_context_mapping(si_domain, pdev,
2097 CONTEXT_TT_MULTI_LEVEL);
2098 if (ret)
2099 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002100 ret = domain_add_dev_info(si_domain, pdev);
2101 if (ret)
2102 return ret;
2103 }
2104
2105 return 0;
2106}
2107
2108int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002109{
2110 struct dmar_drhd_unit *drhd;
2111 struct dmar_rmrr_unit *rmrr;
2112 struct pci_dev *pdev;
2113 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002114 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002115 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002116
2117 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002118 * In case pass through can not be enabled, iommu tries to use identity
2119 * mapping.
2120 */
2121 if (iommu_pass_through)
2122 iommu_identity_mapping = 1;
2123
2124 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002125 * for each drhd
2126 * allocate root
2127 * initialize and program root entry to not present
2128 * endfor
2129 */
2130 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002131 g_num_of_iommus++;
2132 /*
2133 * lock not needed as this is only incremented in the single
2134 * threaded kernel __init code path all other access are read
2135 * only
2136 */
2137 }
2138
Weidong Hand9630fe2008-12-08 11:06:32 +08002139 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2140 GFP_KERNEL);
2141 if (!g_iommus) {
2142 printk(KERN_ERR "Allocating global iommu array failed\n");
2143 ret = -ENOMEM;
2144 goto error;
2145 }
2146
mark gross80b20dd2008-04-18 13:53:58 -07002147 deferred_flush = kzalloc(g_num_of_iommus *
2148 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2149 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002150 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002151 ret = -ENOMEM;
2152 goto error;
2153 }
2154
mark gross5e0d2a62008-03-04 15:22:08 -08002155 for_each_drhd_unit(drhd) {
2156 if (drhd->ignored)
2157 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002158
2159 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002160 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002161
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002162 ret = iommu_init_domains(iommu);
2163 if (ret)
2164 goto error;
2165
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002166 /*
2167 * TBD:
2168 * we could share the same root & context tables
2169 * amoung all IOMMU's. Need to Split it later.
2170 */
2171 ret = iommu_alloc_root_entry(iommu);
2172 if (ret) {
2173 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2174 goto error;
2175 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002176 if (!ecap_pass_through(iommu->ecap))
2177 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002178 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002179 if (iommu_pass_through)
2180 if (!pass_through) {
2181 printk(KERN_INFO
2182 "Pass Through is not supported by hardware.\n");
2183 iommu_pass_through = 0;
2184 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002185
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002186 /*
2187 * Start from the sane iommu hardware state.
2188 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002189 for_each_drhd_unit(drhd) {
2190 if (drhd->ignored)
2191 continue;
2192
2193 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002194
2195 /*
2196 * If the queued invalidation is already initialized by us
2197 * (for example, while enabling interrupt-remapping) then
2198 * we got the things already rolling from a sane state.
2199 */
2200 if (iommu->qi)
2201 continue;
2202
2203 /*
2204 * Clear any previous faults.
2205 */
2206 dmar_fault(-1, iommu);
2207 /*
2208 * Disable queued invalidation if supported and already enabled
2209 * before OS handover.
2210 */
2211 dmar_disable_qi(iommu);
2212 }
2213
2214 for_each_drhd_unit(drhd) {
2215 if (drhd->ignored)
2216 continue;
2217
2218 iommu = drhd->iommu;
2219
Youquan Songa77b67d2008-10-16 16:31:56 -07002220 if (dmar_enable_qi(iommu)) {
2221 /*
2222 * Queued Invalidate not enabled, use Register Based
2223 * Invalidate
2224 */
2225 iommu->flush.flush_context = __iommu_flush_context;
2226 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2227 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002228 "invalidation\n",
2229 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002230 } else {
2231 iommu->flush.flush_context = qi_flush_context;
2232 iommu->flush.flush_iotlb = qi_flush_iotlb;
2233 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002234 "invalidation\n",
2235 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002236 }
2237 }
2238
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002239 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002240 * If pass through is set and enabled, context entries of all pci
2241 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002242 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002243 if (iommu_pass_through) {
2244 ret = init_context_pass_through();
2245 if (ret) {
2246 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2247 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002248 }
2249 }
2250
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002251 /*
2252 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002253 * identity mappings for rmrr, gfx, and isa and may fall back to static
2254 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002255 */
2256 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002257 if (iommu_identity_mapping)
2258 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002259 /*
2260 * For each rmrr
2261 * for each dev attached to rmrr
2262 * do
2263 * locate drhd for dev, alloc domain for dev
2264 * allocate free domain
2265 * allocate page table entries for rmrr
2266 * if context not allocated for bus
2267 * allocate and init context
2268 * set present in root table for this bus
2269 * init context with domain, translation etc
2270 * endfor
2271 * endfor
2272 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002273 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002274 for_each_rmrr_units(rmrr) {
2275 for (i = 0; i < rmrr->devices_cnt; i++) {
2276 pdev = rmrr->devices[i];
2277 /*
2278 * some BIOS lists non-exist devices in DMAR
2279 * table.
2280 */
2281 if (!pdev)
2282 continue;
2283 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2284 if (ret)
2285 printk(KERN_ERR
2286 "IOMMU: mapping reserved region failed\n");
2287 }
2288 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002289
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002290 iommu_prepare_isa();
2291 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002292
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002293 /*
2294 * for each drhd
2295 * enable fault log
2296 * global invalidate context cache
2297 * global invalidate iotlb
2298 * enable translation
2299 */
2300 for_each_drhd_unit(drhd) {
2301 if (drhd->ignored)
2302 continue;
2303 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002304
2305 iommu_flush_write_buffer(iommu);
2306
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002307 ret = dmar_set_interrupt(iommu);
2308 if (ret)
2309 goto error;
2310
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002311 iommu_set_root_entry(iommu);
2312
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002313 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002314 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002315 iommu_disable_protect_mem_regions(iommu);
2316
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002317 ret = iommu_enable_translation(iommu);
2318 if (ret)
2319 goto error;
2320 }
2321
2322 return 0;
2323error:
2324 for_each_drhd_unit(drhd) {
2325 if (drhd->ignored)
2326 continue;
2327 iommu = drhd->iommu;
2328 free_iommu(iommu);
2329 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002330 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002331 return ret;
2332}
2333
2334static inline u64 aligned_size(u64 host_addr, size_t size)
2335{
2336 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002337 addr = (host_addr & (~PAGE_MASK)) + size;
2338 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002339}
2340
2341struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002342iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002343{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002344 struct iova *piova;
2345
2346 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002347 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002348 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002349 return NULL;
2350
2351 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002352 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002353 return piova;
2354}
2355
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002356static struct iova *
2357__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002358 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002359{
2360 struct pci_dev *pdev = to_pci_dev(dev);
2361 struct iova *iova = NULL;
2362
Yang Hongyang284901a2009-04-06 19:01:15 -07002363 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002364 iova = iommu_alloc_iova(domain, size, dma_mask);
2365 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002366 /*
2367 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002368 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002369 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002370 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002371 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002372 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002373 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002374 }
2375
2376 if (!iova) {
2377 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2378 return NULL;
2379 }
2380
2381 return iova;
2382}
2383
2384static struct dmar_domain *
2385get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386{
2387 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002389
2390 domain = get_domain_for_dev(pdev,
2391 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2392 if (!domain) {
2393 printk(KERN_ERR
2394 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002395 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002396 }
2397
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002398 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002399 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002400 ret = domain_context_mapping(domain, pdev,
2401 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002402 if (ret) {
2403 printk(KERN_ERR
2404 "Domain context map for %s failed",
2405 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002406 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002407 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002408 }
2409
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002410 return domain;
2411}
2412
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002413static int iommu_dummy(struct pci_dev *pdev)
2414{
2415 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2416}
2417
2418/* Check if the pdev needs to go through non-identity map and unmap process.*/
2419static int iommu_no_mapping(struct pci_dev *pdev)
2420{
2421 int found;
2422
2423 if (!iommu_identity_mapping)
2424 return iommu_dummy(pdev);
2425
2426 found = identity_mapping(pdev);
2427 if (found) {
2428 if (pdev->dma_mask > DMA_BIT_MASK(32))
2429 return 1;
2430 else {
2431 /*
2432 * 32 bit DMA is removed from si_domain and fall back
2433 * to non-identity mapping.
2434 */
2435 domain_remove_one_dev_info(si_domain, pdev);
2436 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2437 pci_name(pdev));
2438 return 0;
2439 }
2440 } else {
2441 /*
2442 * In case of a detached 64 bit DMA device from vm, the device
2443 * is put into si_domain for identity mapping.
2444 */
2445 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2446 int ret;
2447 ret = domain_add_dev_info(si_domain, pdev);
2448 if (!ret) {
2449 printk(KERN_INFO "64bit %s uses identity mapping\n",
2450 pci_name(pdev));
2451 return 1;
2452 }
2453 }
2454 }
2455
2456 return iommu_dummy(pdev);
2457}
2458
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002459static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2460 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002461{
2462 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002463 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002464 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002465 struct iova *iova;
2466 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002467 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002468 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002469
2470 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002471
2472 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002473 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002474
2475 domain = get_valid_domain_for_dev(pdev);
2476 if (!domain)
2477 return 0;
2478
Weidong Han8c11e792008-12-08 15:29:22 +08002479 iommu = domain_get_iommu(domain);
David Woodhouse0ab36de2009-06-28 14:01:43 +01002480 size = aligned_size(paddr, size) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002481
David Woodhouse0ab36de2009-06-28 14:01:43 +01002482 iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002483 if (!iova)
2484 goto error;
2485
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002486 /*
2487 * Check if DMAR supports zero-length reads on write only
2488 * mappings..
2489 */
2490 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002491 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002492 prot |= DMA_PTE_READ;
2493 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2494 prot |= DMA_PTE_WRITE;
2495 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002496 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002497 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002498 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002499 * is not a big problem
2500 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002501 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2502 paddr >> VTD_PAGE_SHIFT, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002503 if (ret)
2504 goto error;
2505
David Woodhouse0ab36de2009-06-28 14:01:43 +01002506 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2507
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002508 /* it's a non-present to present mapping. Only flush if caching mode */
2509 if (cap_caching_mode(iommu->cap))
David Woodhouse0ab36de2009-06-28 14:01:43 +01002510 iommu_flush_iotlb_psi(iommu, 0, start_paddr, size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002511 else
Weidong Han8c11e792008-12-08 15:29:22 +08002512 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002513
David Woodhouse0ab36de2009-06-28 14:01:43 +01002514 return start_paddr + (paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002515
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002516error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002517 if (iova)
2518 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002519 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002520 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002521 return 0;
2522}
2523
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002524static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2525 unsigned long offset, size_t size,
2526 enum dma_data_direction dir,
2527 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002528{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002529 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2530 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002531}
2532
mark gross5e0d2a62008-03-04 15:22:08 -08002533static void flush_unmaps(void)
2534{
mark gross80b20dd2008-04-18 13:53:58 -07002535 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002536
mark gross5e0d2a62008-03-04 15:22:08 -08002537 timer_on = 0;
2538
2539 /* just flush them all */
2540 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002541 struct intel_iommu *iommu = g_iommus[i];
2542 if (!iommu)
2543 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002544
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002545 if (!deferred_flush[i].next)
2546 continue;
2547
2548 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002549 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002550 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002551 unsigned long mask;
2552 struct iova *iova = deferred_flush[i].iova[j];
2553
2554 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2555 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2556 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2557 iova->pfn_lo << PAGE_SHIFT, mask);
2558 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002559 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002560 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002561 }
2562
mark gross5e0d2a62008-03-04 15:22:08 -08002563 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002564}
2565
2566static void flush_unmaps_timeout(unsigned long data)
2567{
mark gross80b20dd2008-04-18 13:53:58 -07002568 unsigned long flags;
2569
2570 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002571 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002572 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002573}
2574
2575static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2576{
2577 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002578 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002579 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002580
2581 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002582 if (list_size == HIGH_WATER_MARK)
2583 flush_unmaps();
2584
Weidong Han8c11e792008-12-08 15:29:22 +08002585 iommu = domain_get_iommu(dom);
2586 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002587
mark gross80b20dd2008-04-18 13:53:58 -07002588 next = deferred_flush[iommu_id].next;
2589 deferred_flush[iommu_id].domain[next] = dom;
2590 deferred_flush[iommu_id].iova[next] = iova;
2591 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002592
2593 if (!timer_on) {
2594 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2595 timer_on = 1;
2596 }
2597 list_size++;
2598 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2599}
2600
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002601static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2602 size_t size, enum dma_data_direction dir,
2603 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002604{
2605 struct pci_dev *pdev = to_pci_dev(dev);
2606 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002607 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002608 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002609 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002611 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002612 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002613
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002614 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002615 BUG_ON(!domain);
2616
Weidong Han8c11e792008-12-08 15:29:22 +08002617 iommu = domain_get_iommu(domain);
2618
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002619 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2620 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002621 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002622
David Woodhoused794dc92009-06-28 00:27:49 +01002623 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2624 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625
David Woodhoused794dc92009-06-28 00:27:49 +01002626 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2627 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002628
2629 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002630 dma_pte_clear_range(domain, start_pfn, last_pfn);
2631
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002632 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002633 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2634
mark gross5e0d2a62008-03-04 15:22:08 -08002635 if (intel_iommu_strict) {
David Woodhoused794dc92009-06-28 00:27:49 +01002636 iommu_flush_iotlb_psi(iommu, domain->id,
2637 start_pfn << VTD_PAGE_SHIFT,
2638 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002639 /* free iova */
2640 __free_iova(&domain->iovad, iova);
2641 } else {
2642 add_unmap(domain, iova);
2643 /*
2644 * queue up the release of the unmap to save the 1/6th of the
2645 * cpu used up by the iotlb flush operation...
2646 */
mark gross5e0d2a62008-03-04 15:22:08 -08002647 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002648}
2649
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002650static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2651 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002652{
2653 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2654}
2655
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002656static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2657 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658{
2659 void *vaddr;
2660 int order;
2661
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002662 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002663 order = get_order(size);
2664 flags &= ~(GFP_DMA | GFP_DMA32);
2665
2666 vaddr = (void *)__get_free_pages(flags, order);
2667 if (!vaddr)
2668 return NULL;
2669 memset(vaddr, 0, size);
2670
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002671 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2672 DMA_BIDIRECTIONAL,
2673 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002674 if (*dma_handle)
2675 return vaddr;
2676 free_pages((unsigned long)vaddr, order);
2677 return NULL;
2678}
2679
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002680static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2681 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002682{
2683 int order;
2684
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002685 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002686 order = get_order(size);
2687
2688 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2689 free_pages((unsigned long)vaddr, order);
2690}
2691
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002692static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2693 int nelems, enum dma_data_direction dir,
2694 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002695{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696 struct pci_dev *pdev = to_pci_dev(hwdev);
2697 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002698 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002699 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002700 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002701
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002702 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002703 return;
2704
2705 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002706 BUG_ON(!domain);
2707
2708 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002709
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002710 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002711 if (!iova)
2712 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002713
David Woodhoused794dc92009-06-28 00:27:49 +01002714 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2715 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002716
2717 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002718 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002719
David Woodhoused794dc92009-06-28 00:27:49 +01002720 /* free page tables */
2721 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2722
2723 iommu_flush_iotlb_psi(iommu, domain->id,
2724 start_pfn << VTD_PAGE_SHIFT,
2725 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002726
2727 /* free iova */
2728 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002729}
2730
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002731static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002732 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002733{
2734 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002735 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002736
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002737 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002738 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002739 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002740 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002741 }
2742 return nelems;
2743}
2744
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002745static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2746 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002747{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002748 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002749 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750 struct pci_dev *pdev = to_pci_dev(hwdev);
2751 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002752 size_t size = 0;
2753 int prot = 0;
2754 size_t offset = 0;
2755 struct iova *iova = NULL;
2756 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002757 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002758 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002759 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002760
2761 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002762 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002763 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002764
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002765 domain = get_valid_domain_for_dev(pdev);
2766 if (!domain)
2767 return 0;
2768
Weidong Han8c11e792008-12-08 15:29:22 +08002769 iommu = domain_get_iommu(domain);
2770
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002771 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002772 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002773 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002774 }
2775
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002776 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002777 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002778 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002779 return 0;
2780 }
2781
2782 /*
2783 * Check if DMAR supports zero-length reads on write only
2784 * mappings..
2785 */
2786 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002787 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002788 prot |= DMA_PTE_READ;
2789 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2790 prot |= DMA_PTE_WRITE;
2791
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002792 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002793 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002794 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002795 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002796 size = aligned_size((u64)addr, sg->length);
2797 ret = domain_page_mapping(domain, start_addr + offset,
David Woodhousefd18de52009-05-10 23:57:41 +01002798 ((u64)addr) & PHYSICAL_PAGE_MASK,
2799 size, prot);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002800 if (ret) {
2801 /* clear the page */
David Woodhouse595badf2009-06-27 22:09:11 +01002802 dma_pte_clear_range(domain,
2803 start_addr >> VTD_PAGE_SHIFT,
2804 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002806 dma_pte_free_pagetable(domain, start_addr >> VTD_PAGE_SHIFT,
2807 (start_addr + offset - 1) >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002808 /* free iova */
2809 __free_iova(&domain->iovad, iova);
2810 return 0;
2811 }
2812 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002813 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002814 sg->dma_length = sg->length;
2815 offset += size;
2816 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002817
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002818 /* it's a non-present to present mapping. Only flush if caching mode */
2819 if (cap_caching_mode(iommu->cap))
2820 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2821 offset >> VTD_PAGE_SHIFT);
2822 else
Weidong Han8c11e792008-12-08 15:29:22 +08002823 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002824
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002825 return nelems;
2826}
2827
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002828static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2829{
2830 return !dma_addr;
2831}
2832
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002833struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002834 .alloc_coherent = intel_alloc_coherent,
2835 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 .map_sg = intel_map_sg,
2837 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002838 .map_page = intel_map_page,
2839 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002840 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002841};
2842
2843static inline int iommu_domain_cache_init(void)
2844{
2845 int ret = 0;
2846
2847 iommu_domain_cache = kmem_cache_create("iommu_domain",
2848 sizeof(struct dmar_domain),
2849 0,
2850 SLAB_HWCACHE_ALIGN,
2851
2852 NULL);
2853 if (!iommu_domain_cache) {
2854 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2855 ret = -ENOMEM;
2856 }
2857
2858 return ret;
2859}
2860
2861static inline int iommu_devinfo_cache_init(void)
2862{
2863 int ret = 0;
2864
2865 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2866 sizeof(struct device_domain_info),
2867 0,
2868 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002869 NULL);
2870 if (!iommu_devinfo_cache) {
2871 printk(KERN_ERR "Couldn't create devinfo cache\n");
2872 ret = -ENOMEM;
2873 }
2874
2875 return ret;
2876}
2877
2878static inline int iommu_iova_cache_init(void)
2879{
2880 int ret = 0;
2881
2882 iommu_iova_cache = kmem_cache_create("iommu_iova",
2883 sizeof(struct iova),
2884 0,
2885 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002886 NULL);
2887 if (!iommu_iova_cache) {
2888 printk(KERN_ERR "Couldn't create iova cache\n");
2889 ret = -ENOMEM;
2890 }
2891
2892 return ret;
2893}
2894
2895static int __init iommu_init_mempool(void)
2896{
2897 int ret;
2898 ret = iommu_iova_cache_init();
2899 if (ret)
2900 return ret;
2901
2902 ret = iommu_domain_cache_init();
2903 if (ret)
2904 goto domain_error;
2905
2906 ret = iommu_devinfo_cache_init();
2907 if (!ret)
2908 return ret;
2909
2910 kmem_cache_destroy(iommu_domain_cache);
2911domain_error:
2912 kmem_cache_destroy(iommu_iova_cache);
2913
2914 return -ENOMEM;
2915}
2916
2917static void __init iommu_exit_mempool(void)
2918{
2919 kmem_cache_destroy(iommu_devinfo_cache);
2920 kmem_cache_destroy(iommu_domain_cache);
2921 kmem_cache_destroy(iommu_iova_cache);
2922
2923}
2924
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002925static void __init init_no_remapping_devices(void)
2926{
2927 struct dmar_drhd_unit *drhd;
2928
2929 for_each_drhd_unit(drhd) {
2930 if (!drhd->include_all) {
2931 int i;
2932 for (i = 0; i < drhd->devices_cnt; i++)
2933 if (drhd->devices[i] != NULL)
2934 break;
2935 /* ignore DMAR unit if no pci devices exist */
2936 if (i == drhd->devices_cnt)
2937 drhd->ignored = 1;
2938 }
2939 }
2940
2941 if (dmar_map_gfx)
2942 return;
2943
2944 for_each_drhd_unit(drhd) {
2945 int i;
2946 if (drhd->ignored || drhd->include_all)
2947 continue;
2948
2949 for (i = 0; i < drhd->devices_cnt; i++)
2950 if (drhd->devices[i] &&
2951 !IS_GFX_DEVICE(drhd->devices[i]))
2952 break;
2953
2954 if (i < drhd->devices_cnt)
2955 continue;
2956
2957 /* bypass IOMMU if it is just for gfx devices */
2958 drhd->ignored = 1;
2959 for (i = 0; i < drhd->devices_cnt; i++) {
2960 if (!drhd->devices[i])
2961 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002962 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002963 }
2964 }
2965}
2966
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002967#ifdef CONFIG_SUSPEND
2968static int init_iommu_hw(void)
2969{
2970 struct dmar_drhd_unit *drhd;
2971 struct intel_iommu *iommu = NULL;
2972
2973 for_each_active_iommu(iommu, drhd)
2974 if (iommu->qi)
2975 dmar_reenable_qi(iommu);
2976
2977 for_each_active_iommu(iommu, drhd) {
2978 iommu_flush_write_buffer(iommu);
2979
2980 iommu_set_root_entry(iommu);
2981
2982 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002983 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002984 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002985 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002986 iommu_disable_protect_mem_regions(iommu);
2987 iommu_enable_translation(iommu);
2988 }
2989
2990 return 0;
2991}
2992
2993static void iommu_flush_all(void)
2994{
2995 struct dmar_drhd_unit *drhd;
2996 struct intel_iommu *iommu;
2997
2998 for_each_active_iommu(iommu, drhd) {
2999 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003000 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003001 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003002 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003003 }
3004}
3005
3006static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3007{
3008 struct dmar_drhd_unit *drhd;
3009 struct intel_iommu *iommu = NULL;
3010 unsigned long flag;
3011
3012 for_each_active_iommu(iommu, drhd) {
3013 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3014 GFP_ATOMIC);
3015 if (!iommu->iommu_state)
3016 goto nomem;
3017 }
3018
3019 iommu_flush_all();
3020
3021 for_each_active_iommu(iommu, drhd) {
3022 iommu_disable_translation(iommu);
3023
3024 spin_lock_irqsave(&iommu->register_lock, flag);
3025
3026 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3027 readl(iommu->reg + DMAR_FECTL_REG);
3028 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3029 readl(iommu->reg + DMAR_FEDATA_REG);
3030 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3031 readl(iommu->reg + DMAR_FEADDR_REG);
3032 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3033 readl(iommu->reg + DMAR_FEUADDR_REG);
3034
3035 spin_unlock_irqrestore(&iommu->register_lock, flag);
3036 }
3037 return 0;
3038
3039nomem:
3040 for_each_active_iommu(iommu, drhd)
3041 kfree(iommu->iommu_state);
3042
3043 return -ENOMEM;
3044}
3045
3046static int iommu_resume(struct sys_device *dev)
3047{
3048 struct dmar_drhd_unit *drhd;
3049 struct intel_iommu *iommu = NULL;
3050 unsigned long flag;
3051
3052 if (init_iommu_hw()) {
3053 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3054 return -EIO;
3055 }
3056
3057 for_each_active_iommu(iommu, drhd) {
3058
3059 spin_lock_irqsave(&iommu->register_lock, flag);
3060
3061 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3062 iommu->reg + DMAR_FECTL_REG);
3063 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3064 iommu->reg + DMAR_FEDATA_REG);
3065 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3066 iommu->reg + DMAR_FEADDR_REG);
3067 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3068 iommu->reg + DMAR_FEUADDR_REG);
3069
3070 spin_unlock_irqrestore(&iommu->register_lock, flag);
3071 }
3072
3073 for_each_active_iommu(iommu, drhd)
3074 kfree(iommu->iommu_state);
3075
3076 return 0;
3077}
3078
3079static struct sysdev_class iommu_sysclass = {
3080 .name = "iommu",
3081 .resume = iommu_resume,
3082 .suspend = iommu_suspend,
3083};
3084
3085static struct sys_device device_iommu = {
3086 .cls = &iommu_sysclass,
3087};
3088
3089static int __init init_iommu_sysfs(void)
3090{
3091 int error;
3092
3093 error = sysdev_class_register(&iommu_sysclass);
3094 if (error)
3095 return error;
3096
3097 error = sysdev_register(&device_iommu);
3098 if (error)
3099 sysdev_class_unregister(&iommu_sysclass);
3100
3101 return error;
3102}
3103
3104#else
3105static int __init init_iommu_sysfs(void)
3106{
3107 return 0;
3108}
3109#endif /* CONFIG_PM */
3110
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111int __init intel_iommu_init(void)
3112{
3113 int ret = 0;
3114
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003115 if (dmar_table_init())
3116 return -ENODEV;
3117
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003118 if (dmar_dev_scope_init())
3119 return -ENODEV;
3120
Suresh Siddha2ae21012008-07-10 11:16:43 -07003121 /*
3122 * Check the need for DMA-remapping initialization now.
3123 * Above initialization will also be used by Interrupt-remapping.
3124 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003125 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003126 return -ENODEV;
3127
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003128 iommu_init_mempool();
3129 dmar_init_reserved_ranges();
3130
3131 init_no_remapping_devices();
3132
3133 ret = init_dmars();
3134 if (ret) {
3135 printk(KERN_ERR "IOMMU: dmar init failed\n");
3136 put_iova_domain(&reserved_iova_list);
3137 iommu_exit_mempool();
3138 return ret;
3139 }
3140 printk(KERN_INFO
3141 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3142
mark gross5e0d2a62008-03-04 15:22:08 -08003143 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003144 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003145
3146 if (!iommu_pass_through) {
3147 printk(KERN_INFO
3148 "Multi-level page-table translation for DMAR.\n");
3149 dma_ops = &intel_dma_ops;
3150 } else
3151 printk(KERN_INFO
3152 "DMAR: Pass through translation for DMAR.\n");
3153
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003154 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003155
3156 register_iommu(&intel_iommu_ops);
3157
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003158 return 0;
3159}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003160
Han, Weidong3199aa62009-02-26 17:31:12 +08003161static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3162 struct pci_dev *pdev)
3163{
3164 struct pci_dev *tmp, *parent;
3165
3166 if (!iommu || !pdev)
3167 return;
3168
3169 /* dependent device detach */
3170 tmp = pci_find_upstream_pcie_bridge(pdev);
3171 /* Secondary interface's bus number and devfn 0 */
3172 if (tmp) {
3173 parent = pdev->bus->self;
3174 while (parent != tmp) {
3175 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003176 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003177 parent = parent->bus->self;
3178 }
3179 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3180 iommu_detach_dev(iommu,
3181 tmp->subordinate->number, 0);
3182 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003183 iommu_detach_dev(iommu, tmp->bus->number,
3184 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003185 }
3186}
3187
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003188static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003189 struct pci_dev *pdev)
3190{
3191 struct device_domain_info *info;
3192 struct intel_iommu *iommu;
3193 unsigned long flags;
3194 int found = 0;
3195 struct list_head *entry, *tmp;
3196
David Woodhouse276dbf992009-04-04 01:45:37 +01003197 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3198 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003199 if (!iommu)
3200 return;
3201
3202 spin_lock_irqsave(&device_domain_lock, flags);
3203 list_for_each_safe(entry, tmp, &domain->devices) {
3204 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003205 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003206 if (info->bus == pdev->bus->number &&
3207 info->devfn == pdev->devfn) {
3208 list_del(&info->link);
3209 list_del(&info->global);
3210 if (info->dev)
3211 info->dev->dev.archdata.iommu = NULL;
3212 spin_unlock_irqrestore(&device_domain_lock, flags);
3213
Yu Zhao93a23a72009-05-18 13:51:37 +08003214 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003215 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003216 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003217 free_devinfo_mem(info);
3218
3219 spin_lock_irqsave(&device_domain_lock, flags);
3220
3221 if (found)
3222 break;
3223 else
3224 continue;
3225 }
3226
3227 /* if there is no other devices under the same iommu
3228 * owned by this domain, clear this iommu in iommu_bmp
3229 * update iommu count and coherency
3230 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003231 if (iommu == device_to_iommu(info->segment, info->bus,
3232 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003233 found = 1;
3234 }
3235
3236 if (found == 0) {
3237 unsigned long tmp_flags;
3238 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3239 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3240 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003241 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003242 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3243 }
3244
3245 spin_unlock_irqrestore(&device_domain_lock, flags);
3246}
3247
3248static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3249{
3250 struct device_domain_info *info;
3251 struct intel_iommu *iommu;
3252 unsigned long flags1, flags2;
3253
3254 spin_lock_irqsave(&device_domain_lock, flags1);
3255 while (!list_empty(&domain->devices)) {
3256 info = list_entry(domain->devices.next,
3257 struct device_domain_info, link);
3258 list_del(&info->link);
3259 list_del(&info->global);
3260 if (info->dev)
3261 info->dev->dev.archdata.iommu = NULL;
3262
3263 spin_unlock_irqrestore(&device_domain_lock, flags1);
3264
Yu Zhao93a23a72009-05-18 13:51:37 +08003265 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003266 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003267 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003268 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003269
3270 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003271 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003272 */
3273 spin_lock_irqsave(&domain->iommu_lock, flags2);
3274 if (test_and_clear_bit(iommu->seq_id,
3275 &domain->iommu_bmp)) {
3276 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003277 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003278 }
3279 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3280
3281 free_devinfo_mem(info);
3282 spin_lock_irqsave(&device_domain_lock, flags1);
3283 }
3284 spin_unlock_irqrestore(&device_domain_lock, flags1);
3285}
3286
Weidong Han5e98c4b2008-12-08 23:03:27 +08003287/* domain id for virtual machine, it won't be set in context */
3288static unsigned long vm_domid;
3289
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003290static int vm_domain_min_agaw(struct dmar_domain *domain)
3291{
3292 int i;
3293 int min_agaw = domain->agaw;
3294
3295 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3296 for (; i < g_num_of_iommus; ) {
3297 if (min_agaw > g_iommus[i]->agaw)
3298 min_agaw = g_iommus[i]->agaw;
3299
3300 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3301 }
3302
3303 return min_agaw;
3304}
3305
Weidong Han5e98c4b2008-12-08 23:03:27 +08003306static struct dmar_domain *iommu_alloc_vm_domain(void)
3307{
3308 struct dmar_domain *domain;
3309
3310 domain = alloc_domain_mem();
3311 if (!domain)
3312 return NULL;
3313
3314 domain->id = vm_domid++;
3315 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3316 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3317
3318 return domain;
3319}
3320
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003321static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003322{
3323 int adjust_width;
3324
3325 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3326 spin_lock_init(&domain->mapping_lock);
3327 spin_lock_init(&domain->iommu_lock);
3328
3329 domain_reserve_special_ranges(domain);
3330
3331 /* calculate AGAW */
3332 domain->gaw = guest_width;
3333 adjust_width = guestwidth_to_adjustwidth(guest_width);
3334 domain->agaw = width_to_agaw(adjust_width);
3335
3336 INIT_LIST_HEAD(&domain->devices);
3337
3338 domain->iommu_count = 0;
3339 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003340 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003341
3342 /* always allocate the top pgd */
3343 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3344 if (!domain->pgd)
3345 return -ENOMEM;
3346 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3347 return 0;
3348}
3349
3350static void iommu_free_vm_domain(struct dmar_domain *domain)
3351{
3352 unsigned long flags;
3353 struct dmar_drhd_unit *drhd;
3354 struct intel_iommu *iommu;
3355 unsigned long i;
3356 unsigned long ndomains;
3357
3358 for_each_drhd_unit(drhd) {
3359 if (drhd->ignored)
3360 continue;
3361 iommu = drhd->iommu;
3362
3363 ndomains = cap_ndoms(iommu->cap);
3364 i = find_first_bit(iommu->domain_ids, ndomains);
3365 for (; i < ndomains; ) {
3366 if (iommu->domains[i] == domain) {
3367 spin_lock_irqsave(&iommu->lock, flags);
3368 clear_bit(i, iommu->domain_ids);
3369 iommu->domains[i] = NULL;
3370 spin_unlock_irqrestore(&iommu->lock, flags);
3371 break;
3372 }
3373 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3374 }
3375 }
3376}
3377
3378static void vm_domain_exit(struct dmar_domain *domain)
3379{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003380 /* Domain 0 is reserved, so dont process it */
3381 if (!domain)
3382 return;
3383
3384 vm_domain_remove_all_dev_info(domain);
3385 /* destroy iovas */
3386 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003387
3388 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003389 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003390
3391 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003392 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003393
3394 iommu_free_vm_domain(domain);
3395 free_domain_mem(domain);
3396}
3397
Joerg Roedel5d450802008-12-03 14:52:32 +01003398static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003399{
Joerg Roedel5d450802008-12-03 14:52:32 +01003400 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003401
Joerg Roedel5d450802008-12-03 14:52:32 +01003402 dmar_domain = iommu_alloc_vm_domain();
3403 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003404 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003405 "intel_iommu_domain_init: dmar_domain == NULL\n");
3406 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003407 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003408 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003409 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003410 "intel_iommu_domain_init() failed\n");
3411 vm_domain_exit(dmar_domain);
3412 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003413 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003414 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003415
Joerg Roedel5d450802008-12-03 14:52:32 +01003416 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003417}
Kay, Allen M38717942008-09-09 18:37:29 +03003418
Joerg Roedel5d450802008-12-03 14:52:32 +01003419static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003420{
Joerg Roedel5d450802008-12-03 14:52:32 +01003421 struct dmar_domain *dmar_domain = domain->priv;
3422
3423 domain->priv = NULL;
3424 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003425}
Kay, Allen M38717942008-09-09 18:37:29 +03003426
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003427static int intel_iommu_attach_device(struct iommu_domain *domain,
3428 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003429{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003430 struct dmar_domain *dmar_domain = domain->priv;
3431 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003432 struct intel_iommu *iommu;
3433 int addr_width;
3434 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003435 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003436
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003437 /* normally pdev is not mapped */
3438 if (unlikely(domain_context_mapped(pdev))) {
3439 struct dmar_domain *old_domain;
3440
3441 old_domain = find_domain(pdev);
3442 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003443 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3444 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3445 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003446 else
3447 domain_remove_dev_info(old_domain);
3448 }
3449 }
3450
David Woodhouse276dbf992009-04-04 01:45:37 +01003451 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3452 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003453 if (!iommu)
3454 return -ENODEV;
3455
3456 /* check if this iommu agaw is sufficient for max mapped address */
3457 addr_width = agaw_to_width(iommu->agaw);
3458 end = DOMAIN_MAX_ADDR(addr_width);
3459 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003460 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003461 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3462 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003463 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003464 return -EFAULT;
3465 }
3466
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003467 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003468 if (ret)
3469 return ret;
3470
Yu Zhao93a23a72009-05-18 13:51:37 +08003471 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003472 return ret;
3473}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003474
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003475static void intel_iommu_detach_device(struct iommu_domain *domain,
3476 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003477{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003478 struct dmar_domain *dmar_domain = domain->priv;
3479 struct pci_dev *pdev = to_pci_dev(dev);
3480
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003481 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003482}
Kay, Allen M38717942008-09-09 18:37:29 +03003483
Joerg Roedeldde57a22008-12-03 15:04:09 +01003484static int intel_iommu_map_range(struct iommu_domain *domain,
3485 unsigned long iova, phys_addr_t hpa,
3486 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003487{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003488 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003489 u64 max_addr;
3490 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003491 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003492 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003493
Joerg Roedeldde57a22008-12-03 15:04:09 +01003494 if (iommu_prot & IOMMU_READ)
3495 prot |= DMA_PTE_READ;
3496 if (iommu_prot & IOMMU_WRITE)
3497 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003498 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3499 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003500
David Woodhouse163cc522009-06-28 00:51:17 +01003501 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003502 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003503 int min_agaw;
3504 u64 end;
3505
3506 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003507 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003508 addr_width = agaw_to_width(min_agaw);
3509 end = DOMAIN_MAX_ADDR(addr_width);
3510 end = end & VTD_PAGE_MASK;
3511 if (end < max_addr) {
3512 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3513 "sufficient for the mapped address (%llx)\n",
3514 __func__, min_agaw, max_addr);
3515 return -EFAULT;
3516 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003517 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003518 }
David Woodhousead051222009-06-28 14:22:28 +01003519 /* Round up size to next multiple of PAGE_SIZE, if it and
3520 the low bits of hpa would take us onto the next page */
3521 size = aligned_size(hpa, size) >> VTD_PAGE_SHIFT;
3522 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3523 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003524 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003525}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003526
Joerg Roedeldde57a22008-12-03 15:04:09 +01003527static void intel_iommu_unmap_range(struct iommu_domain *domain,
3528 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003529{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003530 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003531
David Woodhouse163cc522009-06-28 00:51:17 +01003532 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3533 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003534
David Woodhouse163cc522009-06-28 00:51:17 +01003535 if (dmar_domain->max_addr == iova + size)
3536 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003537}
Kay, Allen M38717942008-09-09 18:37:29 +03003538
Joerg Roedeld14d6572008-12-03 15:06:57 +01003539static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3540 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003541{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003542 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003543 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003544 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003545
David Woodhouseb026fd22009-06-28 10:37:25 +01003546 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003547 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003548 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003549
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003550 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003551}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003552
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003553static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3554 unsigned long cap)
3555{
3556 struct dmar_domain *dmar_domain = domain->priv;
3557
3558 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3559 return dmar_domain->iommu_snooping;
3560
3561 return 0;
3562}
3563
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003564static struct iommu_ops intel_iommu_ops = {
3565 .domain_init = intel_iommu_domain_init,
3566 .domain_destroy = intel_iommu_domain_destroy,
3567 .attach_dev = intel_iommu_attach_device,
3568 .detach_dev = intel_iommu_detach_device,
3569 .map = intel_iommu_map_range,
3570 .unmap = intel_iommu_unmap_range,
3571 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003572 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003573};
David Woodhouse9af88142009-02-13 23:18:03 +00003574
3575static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3576{
3577 /*
3578 * Mobile 4 Series Chipset neglects to set RWBF capability,
3579 * but needs it:
3580 */
3581 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3582 rwbf_quirk = 1;
3583}
3584
3585DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);