blob: 420afa887283709d7f65806310e8553d78868ff9 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59
Mark McLoughlinf27be032008-11-20 15:49:43 +000060#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070061#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070062#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080063
David Woodhousefd18de52009-05-10 23:57:41 +010064#ifndef PHYSICAL_PAGE_MASK
65#define PHYSICAL_PAGE_MASK PAGE_MASK
66#endif
67
Weidong Hand9630fe2008-12-08 11:06:32 +080068/* global iommu list, set NULL for ignored DMAR units */
69static struct intel_iommu **g_iommus;
70
David Woodhouse9af88142009-02-13 23:18:03 +000071static int rwbf_quirk;
72
Mark McLoughlin46b08e12008-11-20 15:49:44 +000073/*
74 * 0: Present
75 * 1-11: Reserved
76 * 12-63: Context Ptr (12 - (haw-1))
77 * 64-127: Reserved
78 */
79struct root_entry {
80 u64 val;
81 u64 rsvd1;
82};
83#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
84static inline bool root_present(struct root_entry *root)
85{
86 return (root->val & 1);
87}
88static inline void set_root_present(struct root_entry *root)
89{
90 root->val |= 1;
91}
92static inline void set_root_value(struct root_entry *root, unsigned long value)
93{
94 root->val |= value & VTD_PAGE_MASK;
95}
96
97static inline struct context_entry *
98get_context_addr_from_root(struct root_entry *root)
99{
100 return (struct context_entry *)
101 (root_present(root)?phys_to_virt(
102 root->val & VTD_PAGE_MASK) :
103 NULL);
104}
105
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000106/*
107 * low 64 bits:
108 * 0: present
109 * 1: fault processing disable
110 * 2-3: translation type
111 * 12-63: address space root
112 * high 64 bits:
113 * 0-2: address width
114 * 3-6: aval
115 * 8-23: domain id
116 */
117struct context_entry {
118 u64 lo;
119 u64 hi;
120};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000121
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000122static inline bool context_present(struct context_entry *context)
123{
124 return (context->lo & 1);
125}
126static inline void context_set_present(struct context_entry *context)
127{
128 context->lo |= 1;
129}
130
131static inline void context_set_fault_enable(struct context_entry *context)
132{
133 context->lo &= (((u64)-1) << 2) | 1;
134}
135
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000136static inline void context_set_translation_type(struct context_entry *context,
137 unsigned long value)
138{
139 context->lo &= (((u64)-1) << 4) | 3;
140 context->lo |= (value & 3) << 2;
141}
142
143static inline void context_set_address_root(struct context_entry *context,
144 unsigned long value)
145{
146 context->lo |= value & VTD_PAGE_MASK;
147}
148
149static inline void context_set_address_width(struct context_entry *context,
150 unsigned long value)
151{
152 context->hi |= value & 7;
153}
154
155static inline void context_set_domain_id(struct context_entry *context,
156 unsigned long value)
157{
158 context->hi |= (value & ((1 << 16) - 1)) << 8;
159}
160
161static inline void context_clear_entry(struct context_entry *context)
162{
163 context->lo = 0;
164 context->hi = 0;
165}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000166
Mark McLoughlin622ba122008-11-20 15:49:46 +0000167/*
168 * 0: readable
169 * 1: writable
170 * 2-6: reserved
171 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800172 * 8-10: available
173 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000174 * 12-63: Host physcial address
175 */
176struct dma_pte {
177 u64 val;
178};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000179
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000180static inline void dma_clear_pte(struct dma_pte *pte)
181{
182 pte->val = 0;
183}
184
185static inline void dma_set_pte_readable(struct dma_pte *pte)
186{
187 pte->val |= DMA_PTE_READ;
188}
189
190static inline void dma_set_pte_writable(struct dma_pte *pte)
191{
192 pte->val |= DMA_PTE_WRITE;
193}
194
Sheng Yang9cf066972009-03-18 15:33:07 +0800195static inline void dma_set_pte_snp(struct dma_pte *pte)
196{
197 pte->val |= DMA_PTE_SNP;
198}
199
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000200static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
201{
202 pte->val = (pte->val & ~3) | (prot & 3);
203}
204
205static inline u64 dma_pte_addr(struct dma_pte *pte)
206{
207 return (pte->val & VTD_PAGE_MASK);
208}
209
210static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
211{
212 pte->val |= (addr & VTD_PAGE_MASK);
213}
214
215static inline bool dma_pte_present(struct dma_pte *pte)
216{
217 return (pte->val & 3) != 0;
218}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000219
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700220/*
221 * This domain is a statically identity mapping domain.
222 * 1. This domain creats a static 1:1 mapping to all usable memory.
223 * 2. It maps to each iommu if successful.
224 * 3. Each iommu mapps to this domain if successful.
225 */
226struct dmar_domain *si_domain;
227
Weidong Han3b5410e2008-12-08 09:17:15 +0800228/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100229#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800230
Weidong Han1ce28fe2008-12-08 16:35:39 +0800231/* domain represents a virtual machine, more than one devices
232 * across iommus may be owned in one domain, e.g. kvm guest.
233 */
234#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
235
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700236/* si_domain contains mulitple devices */
237#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
238
Mark McLoughlin99126f72008-11-20 15:49:47 +0000239struct dmar_domain {
240 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800241 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000242
243 struct list_head devices; /* all devices' list */
244 struct iova_domain iovad; /* iova's that belong to this domain */
245
246 struct dma_pte *pgd; /* virtual address */
247 spinlock_t mapping_lock; /* page table lock */
248 int gaw; /* max guest address width */
249
250 /* adjusted guest address width, 0 is level 2 30-bit */
251 int agaw;
252
Weidong Han3b5410e2008-12-08 09:17:15 +0800253 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800254
255 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800256 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800257 int iommu_count; /* reference count of iommu */
258 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800259 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000260};
261
Mark McLoughlina647dac2008-11-20 15:49:48 +0000262/* PCI domain-device relationship */
263struct device_domain_info {
264 struct list_head link; /* link to domain siblings */
265 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100266 int segment; /* PCI domain */
267 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000268 u8 devfn; /* PCI devfn number */
269 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800270 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000271 struct dmar_domain *domain; /* pointer to domain */
272};
273
mark gross5e0d2a62008-03-04 15:22:08 -0800274static void flush_unmaps_timeout(unsigned long data);
275
276DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
277
mark gross80b20dd2008-04-18 13:53:58 -0700278#define HIGH_WATER_MARK 250
279struct deferred_flush_tables {
280 int next;
281 struct iova *iova[HIGH_WATER_MARK];
282 struct dmar_domain *domain[HIGH_WATER_MARK];
283};
284
285static struct deferred_flush_tables *deferred_flush;
286
mark gross5e0d2a62008-03-04 15:22:08 -0800287/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800288static int g_num_of_iommus;
289
290static DEFINE_SPINLOCK(async_umap_flush_lock);
291static LIST_HEAD(unmaps_to_do);
292
293static int timer_on;
294static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800295
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700296static void domain_remove_dev_info(struct dmar_domain *domain);
297
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800298#ifdef CONFIG_DMAR_DEFAULT_ON
299int dmar_disabled = 0;
300#else
301int dmar_disabled = 1;
302#endif /*CONFIG_DMAR_DEFAULT_ON*/
303
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700304static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700305static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800306static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700307
308#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
309static DEFINE_SPINLOCK(device_domain_lock);
310static LIST_HEAD(device_domain_list);
311
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100312static struct iommu_ops intel_iommu_ops;
313
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700314static int __init intel_iommu_setup(char *str)
315{
316 if (!str)
317 return -EINVAL;
318 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800319 if (!strncmp(str, "on", 2)) {
320 dmar_disabled = 0;
321 printk(KERN_INFO "Intel-IOMMU: enabled\n");
322 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700323 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800324 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700325 } else if (!strncmp(str, "igfx_off", 8)) {
326 dmar_map_gfx = 0;
327 printk(KERN_INFO
328 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700329 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800330 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700331 "Intel-IOMMU: Forcing DAC for PCI devices\n");
332 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800333 } else if (!strncmp(str, "strict", 6)) {
334 printk(KERN_INFO
335 "Intel-IOMMU: disable batched IOTLB flush\n");
336 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700337 }
338
339 str += strcspn(str, ",");
340 while (*str == ',')
341 str++;
342 }
343 return 0;
344}
345__setup("intel_iommu=", intel_iommu_setup);
346
347static struct kmem_cache *iommu_domain_cache;
348static struct kmem_cache *iommu_devinfo_cache;
349static struct kmem_cache *iommu_iova_cache;
350
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700351static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
352{
353 unsigned int flags;
354 void *vaddr;
355
356 /* trying to avoid low memory issues */
357 flags = current->flags & PF_MEMALLOC;
358 current->flags |= PF_MEMALLOC;
359 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
360 current->flags &= (~PF_MEMALLOC | flags);
361 return vaddr;
362}
363
364
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700365static inline void *alloc_pgtable_page(void)
366{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700367 unsigned int flags;
368 void *vaddr;
369
370 /* trying to avoid low memory issues */
371 flags = current->flags & PF_MEMALLOC;
372 current->flags |= PF_MEMALLOC;
373 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
374 current->flags &= (~PF_MEMALLOC | flags);
375 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700376}
377
378static inline void free_pgtable_page(void *vaddr)
379{
380 free_page((unsigned long)vaddr);
381}
382
383static inline void *alloc_domain_mem(void)
384{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700385 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700386}
387
Kay, Allen M38717942008-09-09 18:37:29 +0300388static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700389{
390 kmem_cache_free(iommu_domain_cache, vaddr);
391}
392
393static inline void * alloc_devinfo_mem(void)
394{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700395 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700396}
397
398static inline void free_devinfo_mem(void *vaddr)
399{
400 kmem_cache_free(iommu_devinfo_cache, vaddr);
401}
402
403struct iova *alloc_iova_mem(void)
404{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700405 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700406}
407
408void free_iova_mem(struct iova *iova)
409{
410 kmem_cache_free(iommu_iova_cache, iova);
411}
412
Weidong Han1b573682008-12-08 15:34:06 +0800413
414static inline int width_to_agaw(int width);
415
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700416static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800417{
418 unsigned long sagaw;
419 int agaw = -1;
420
421 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700422 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800423 agaw >= 0; agaw--) {
424 if (test_bit(agaw, &sagaw))
425 break;
426 }
427
428 return agaw;
429}
430
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700431/*
432 * Calculate max SAGAW for each iommu.
433 */
434int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
435{
436 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
437}
438
439/*
440 * calculate agaw for each iommu.
441 * "SAGAW" may be different across iommus, use a default agaw, and
442 * get a supported less agaw for iommus that don't support the default agaw.
443 */
444int iommu_calculate_agaw(struct intel_iommu *iommu)
445{
446 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
447}
448
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700449/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800450static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
451{
452 int iommu_id;
453
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700454 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800455 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700456 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800457
Weidong Han8c11e792008-12-08 15:29:22 +0800458 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
459 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
460 return NULL;
461
462 return g_iommus[iommu_id];
463}
464
Weidong Han8e6040972008-12-08 15:49:06 +0800465static void domain_update_iommu_coherency(struct dmar_domain *domain)
466{
467 int i;
468
469 domain->iommu_coherency = 1;
470
471 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
472 for (; i < g_num_of_iommus; ) {
473 if (!ecap_coherent(g_iommus[i]->ecap)) {
474 domain->iommu_coherency = 0;
475 break;
476 }
477 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
478 }
479}
480
Sheng Yang58c610b2009-03-18 15:33:05 +0800481static void domain_update_iommu_snooping(struct dmar_domain *domain)
482{
483 int i;
484
485 domain->iommu_snooping = 1;
486
487 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
488 for (; i < g_num_of_iommus; ) {
489 if (!ecap_sc_support(g_iommus[i]->ecap)) {
490 domain->iommu_snooping = 0;
491 break;
492 }
493 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
494 }
495}
496
497/* Some capabilities may be different across iommus */
498static void domain_update_iommu_cap(struct dmar_domain *domain)
499{
500 domain_update_iommu_coherency(domain);
501 domain_update_iommu_snooping(domain);
502}
503
David Woodhouse276dbf992009-04-04 01:45:37 +0100504static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800505{
506 struct dmar_drhd_unit *drhd = NULL;
507 int i;
508
509 for_each_drhd_unit(drhd) {
510 if (drhd->ignored)
511 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100512 if (segment != drhd->segment)
513 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800514
David Woodhouse924b6232009-04-04 00:39:25 +0100515 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000516 if (drhd->devices[i] &&
517 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800518 drhd->devices[i]->devfn == devfn)
519 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700520 if (drhd->devices[i] &&
521 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100522 drhd->devices[i]->subordinate->number <= bus &&
523 drhd->devices[i]->subordinate->subordinate >= bus)
524 return drhd->iommu;
525 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800526
527 if (drhd->include_all)
528 return drhd->iommu;
529 }
530
531 return NULL;
532}
533
Weidong Han5331fe62008-12-08 23:00:00 +0800534static void domain_flush_cache(struct dmar_domain *domain,
535 void *addr, int size)
536{
537 if (!domain->iommu_coherency)
538 clflush_cache_range(addr, size);
539}
540
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700541/* Gets context entry for a given bus and devfn */
542static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
543 u8 bus, u8 devfn)
544{
545 struct root_entry *root;
546 struct context_entry *context;
547 unsigned long phy_addr;
548 unsigned long flags;
549
550 spin_lock_irqsave(&iommu->lock, flags);
551 root = &iommu->root_entry[bus];
552 context = get_context_addr_from_root(root);
553 if (!context) {
554 context = (struct context_entry *)alloc_pgtable_page();
555 if (!context) {
556 spin_unlock_irqrestore(&iommu->lock, flags);
557 return NULL;
558 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700559 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700560 phy_addr = virt_to_phys((void *)context);
561 set_root_value(root, phy_addr);
562 set_root_present(root);
563 __iommu_flush_cache(iommu, root, sizeof(*root));
564 }
565 spin_unlock_irqrestore(&iommu->lock, flags);
566 return &context[devfn];
567}
568
569static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
570{
571 struct root_entry *root;
572 struct context_entry *context;
573 int ret;
574 unsigned long flags;
575
576 spin_lock_irqsave(&iommu->lock, flags);
577 root = &iommu->root_entry[bus];
578 context = get_context_addr_from_root(root);
579 if (!context) {
580 ret = 0;
581 goto out;
582 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000583 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700584out:
585 spin_unlock_irqrestore(&iommu->lock, flags);
586 return ret;
587}
588
589static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
590{
591 struct root_entry *root;
592 struct context_entry *context;
593 unsigned long flags;
594
595 spin_lock_irqsave(&iommu->lock, flags);
596 root = &iommu->root_entry[bus];
597 context = get_context_addr_from_root(root);
598 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000599 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700600 __iommu_flush_cache(iommu, &context[devfn], \
601 sizeof(*context));
602 }
603 spin_unlock_irqrestore(&iommu->lock, flags);
604}
605
606static void free_context_table(struct intel_iommu *iommu)
607{
608 struct root_entry *root;
609 int i;
610 unsigned long flags;
611 struct context_entry *context;
612
613 spin_lock_irqsave(&iommu->lock, flags);
614 if (!iommu->root_entry) {
615 goto out;
616 }
617 for (i = 0; i < ROOT_ENTRY_NR; i++) {
618 root = &iommu->root_entry[i];
619 context = get_context_addr_from_root(root);
620 if (context)
621 free_pgtable_page(context);
622 }
623 free_pgtable_page(iommu->root_entry);
624 iommu->root_entry = NULL;
625out:
626 spin_unlock_irqrestore(&iommu->lock, flags);
627}
628
629/* page table handling */
630#define LEVEL_STRIDE (9)
631#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
632
633static inline int agaw_to_level(int agaw)
634{
635 return agaw + 2;
636}
637
638static inline int agaw_to_width(int agaw)
639{
640 return 30 + agaw * LEVEL_STRIDE;
641
642}
643
644static inline int width_to_agaw(int width)
645{
646 return (width - 30) / LEVEL_STRIDE;
647}
648
649static inline unsigned int level_to_offset_bits(int level)
650{
651 return (12 + (level - 1) * LEVEL_STRIDE);
652}
653
654static inline int address_level_offset(u64 addr, int level)
655{
656 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
657}
658
659static inline u64 level_mask(int level)
660{
661 return ((u64)-1 << level_to_offset_bits(level));
662}
663
664static inline u64 level_size(int level)
665{
666 return ((u64)1 << level_to_offset_bits(level));
667}
668
669static inline u64 align_to_level(u64 addr, int level)
670{
671 return ((addr + level_size(level) - 1) & level_mask(level));
672}
673
674static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
675{
676 int addr_width = agaw_to_width(domain->agaw);
677 struct dma_pte *parent, *pte = NULL;
678 int level = agaw_to_level(domain->agaw);
679 int offset;
680 unsigned long flags;
681
682 BUG_ON(!domain->pgd);
683
684 addr &= (((u64)1) << addr_width) - 1;
685 parent = domain->pgd;
686
687 spin_lock_irqsave(&domain->mapping_lock, flags);
688 while (level > 0) {
689 void *tmp_page;
690
691 offset = address_level_offset(addr, level);
692 pte = &parent[offset];
693 if (level == 1)
694 break;
695
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000696 if (!dma_pte_present(pte)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697 tmp_page = alloc_pgtable_page();
698
699 if (!tmp_page) {
700 spin_unlock_irqrestore(&domain->mapping_lock,
701 flags);
702 return NULL;
703 }
Weidong Han5331fe62008-12-08 23:00:00 +0800704 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000705 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 /*
707 * high level table always sets r/w, last level page
708 * table control read/write
709 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000710 dma_set_pte_readable(pte);
711 dma_set_pte_writable(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800712 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000714 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715 level--;
716 }
717
718 spin_unlock_irqrestore(&domain->mapping_lock, flags);
719 return pte;
720}
721
722/* return address's pte at specific level */
723static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
724 int level)
725{
726 struct dma_pte *parent, *pte = NULL;
727 int total = agaw_to_level(domain->agaw);
728 int offset;
729
730 parent = domain->pgd;
731 while (level <= total) {
732 offset = address_level_offset(addr, total);
733 pte = &parent[offset];
734 if (level == total)
735 return pte;
736
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000737 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700738 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000739 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700740 total--;
741 }
742 return NULL;
743}
744
745/* clear one page's page table */
746static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
747{
748 struct dma_pte *pte = NULL;
749
750 /* get last level pte */
751 pte = dma_addr_level_pte(domain, addr, 1);
752
753 if (pte) {
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000754 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800755 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700756 }
757}
758
759/* clear last level pte, a tlb flush should be followed */
760static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
761{
762 int addr_width = agaw_to_width(domain->agaw);
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800763 int npages;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764
765 start &= (((u64)1) << addr_width) - 1;
766 end &= (((u64)1) << addr_width) - 1;
767 /* in case it's partial page */
Fenghua Yu31d35682009-04-06 11:21:49 -0700768 start &= PAGE_MASK;
769 end = PAGE_ALIGN(end);
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800770 npages = (end - start) / VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771
772 /* we don't need lock here, nobody else touches the iova range */
Zhao, Yuafeeb7c2009-02-13 17:55:49 +0800773 while (npages--) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 dma_pte_clear_one(domain, start);
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700775 start += VTD_PAGE_SIZE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 }
777}
778
779/* free page table pages. last level pte should already be cleared */
780static void dma_pte_free_pagetable(struct dmar_domain *domain,
781 u64 start, u64 end)
782{
783 int addr_width = agaw_to_width(domain->agaw);
784 struct dma_pte *pte;
785 int total = agaw_to_level(domain->agaw);
786 int level;
787 u64 tmp;
788
789 start &= (((u64)1) << addr_width) - 1;
790 end &= (((u64)1) << addr_width) - 1;
791
792 /* we don't need lock here, nobody else touches the iova range */
793 level = 2;
794 while (level <= total) {
795 tmp = align_to_level(start, level);
796 if (tmp >= end || (tmp + level_size(level) > end))
797 return;
798
799 while (tmp < end) {
800 pte = dma_addr_level_pte(domain, tmp, level);
801 if (pte) {
802 free_pgtable_page(
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000803 phys_to_virt(dma_pte_addr(pte)));
804 dma_clear_pte(pte);
Weidong Han5331fe62008-12-08 23:00:00 +0800805 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 }
807 tmp += level_size(level);
808 }
809 level++;
810 }
811 /* free pgd */
812 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
813 free_pgtable_page(domain->pgd);
814 domain->pgd = NULL;
815 }
816}
817
818/* iommu handling */
819static int iommu_alloc_root_entry(struct intel_iommu *iommu)
820{
821 struct root_entry *root;
822 unsigned long flags;
823
824 root = (struct root_entry *)alloc_pgtable_page();
825 if (!root)
826 return -ENOMEM;
827
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700828 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829
830 spin_lock_irqsave(&iommu->lock, flags);
831 iommu->root_entry = root;
832 spin_unlock_irqrestore(&iommu->lock, flags);
833
834 return 0;
835}
836
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700837static void iommu_set_root_entry(struct intel_iommu *iommu)
838{
839 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100840 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841 unsigned long flag;
842
843 addr = iommu->root_entry;
844
845 spin_lock_irqsave(&iommu->register_lock, flag);
846 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
847
David Woodhousec416daa2009-05-10 20:30:58 +0100848 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849
850 /* Make sure hardware complete it */
851 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100852 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700853
854 spin_unlock_irqrestore(&iommu->register_lock, flag);
855}
856
857static void iommu_flush_write_buffer(struct intel_iommu *iommu)
858{
859 u32 val;
860 unsigned long flag;
861
David Woodhouse9af88142009-02-13 23:18:03 +0000862 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864
865 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100866 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700867
868 /* Make sure hardware complete it */
869 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100870 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700871
872 spin_unlock_irqrestore(&iommu->register_lock, flag);
873}
874
875/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100876static void __iommu_flush_context(struct intel_iommu *iommu,
877 u16 did, u16 source_id, u8 function_mask,
878 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879{
880 u64 val = 0;
881 unsigned long flag;
882
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700883 switch (type) {
884 case DMA_CCMD_GLOBAL_INVL:
885 val = DMA_CCMD_GLOBAL_INVL;
886 break;
887 case DMA_CCMD_DOMAIN_INVL:
888 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
889 break;
890 case DMA_CCMD_DEVICE_INVL:
891 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
892 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
893 break;
894 default:
895 BUG();
896 }
897 val |= DMA_CCMD_ICC;
898
899 spin_lock_irqsave(&iommu->register_lock, flag);
900 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
901
902 /* Make sure hardware complete it */
903 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
904 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
905
906 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907}
908
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700909/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100910static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
911 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700912{
913 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
914 u64 val = 0, val_iva = 0;
915 unsigned long flag;
916
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700917 switch (type) {
918 case DMA_TLB_GLOBAL_FLUSH:
919 /* global flush doesn't need set IVA_REG */
920 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
921 break;
922 case DMA_TLB_DSI_FLUSH:
923 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
924 break;
925 case DMA_TLB_PSI_FLUSH:
926 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
927 /* Note: always flush non-leaf currently */
928 val_iva = size_order | addr;
929 break;
930 default:
931 BUG();
932 }
933 /* Note: set drain read/write */
934#if 0
935 /*
936 * This is probably to be super secure.. Looks like we can
937 * ignore it without any impact.
938 */
939 if (cap_read_drain(iommu->cap))
940 val |= DMA_TLB_READ_DRAIN;
941#endif
942 if (cap_write_drain(iommu->cap))
943 val |= DMA_TLB_WRITE_DRAIN;
944
945 spin_lock_irqsave(&iommu->register_lock, flag);
946 /* Note: Only uses first TLB reg currently */
947 if (val_iva)
948 dmar_writeq(iommu->reg + tlb_offset, val_iva);
949 dmar_writeq(iommu->reg + tlb_offset + 8, val);
950
951 /* Make sure hardware complete it */
952 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
953 dmar_readq, (!(val & DMA_TLB_IVT)), val);
954
955 spin_unlock_irqrestore(&iommu->register_lock, flag);
956
957 /* check IOTLB invalidation granularity */
958 if (DMA_TLB_IAIG(val) == 0)
959 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
960 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
961 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700962 (unsigned long long)DMA_TLB_IIRG(type),
963 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964}
965
Yu Zhao93a23a72009-05-18 13:51:37 +0800966static struct device_domain_info *iommu_support_dev_iotlb(
967 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968{
Yu Zhao93a23a72009-05-18 13:51:37 +0800969 int found = 0;
970 unsigned long flags;
971 struct device_domain_info *info;
972 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
973
974 if (!ecap_dev_iotlb_support(iommu->ecap))
975 return NULL;
976
977 if (!iommu->qi)
978 return NULL;
979
980 spin_lock_irqsave(&device_domain_lock, flags);
981 list_for_each_entry(info, &domain->devices, link)
982 if (info->bus == bus && info->devfn == devfn) {
983 found = 1;
984 break;
985 }
986 spin_unlock_irqrestore(&device_domain_lock, flags);
987
988 if (!found || !info->dev)
989 return NULL;
990
991 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
992 return NULL;
993
994 if (!dmar_find_matched_atsr_unit(info->dev))
995 return NULL;
996
997 info->iommu = iommu;
998
999 return info;
1000}
1001
1002static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1003{
1004 if (!info)
1005 return;
1006
1007 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1008}
1009
1010static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1011{
1012 if (!info->dev || !pci_ats_enabled(info->dev))
1013 return;
1014
1015 pci_disable_ats(info->dev);
1016}
1017
1018static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1019 u64 addr, unsigned mask)
1020{
1021 u16 sid, qdep;
1022 unsigned long flags;
1023 struct device_domain_info *info;
1024
1025 spin_lock_irqsave(&device_domain_lock, flags);
1026 list_for_each_entry(info, &domain->devices, link) {
1027 if (!info->dev || !pci_ats_enabled(info->dev))
1028 continue;
1029
1030 sid = info->bus << 8 | info->devfn;
1031 qdep = pci_ats_queue_depth(info->dev);
1032 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1033 }
1034 spin_unlock_irqrestore(&device_domain_lock, flags);
1035}
1036
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001037static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1038 u64 addr, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001039{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001040 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001041
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001042 BUG_ON(addr & (~VTD_PAGE_MASK));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001043 BUG_ON(pages == 0);
1044
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001045 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001046 * Fallback to domain selective flush if no PSI support or the size is
1047 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001048 * PSI requires page size to be 2 ^ x, and the base address is naturally
1049 * aligned to the size
1050 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001051 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1052 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001053 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001054 else
1055 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1056 DMA_TLB_PSI_FLUSH);
Yu Zhao93a23a72009-05-18 13:51:37 +08001057 if (did)
1058 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001059}
1060
mark grossf8bab732008-02-08 04:18:38 -08001061static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1062{
1063 u32 pmen;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&iommu->register_lock, flags);
1067 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1068 pmen &= ~DMA_PMEN_EPM;
1069 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1070
1071 /* wait for the protected region status bit to clear */
1072 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1073 readl, !(pmen & DMA_PMEN_PRS), pmen);
1074
1075 spin_unlock_irqrestore(&iommu->register_lock, flags);
1076}
1077
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001078static int iommu_enable_translation(struct intel_iommu *iommu)
1079{
1080 u32 sts;
1081 unsigned long flags;
1082
1083 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001084 iommu->gcmd |= DMA_GCMD_TE;
1085 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086
1087 /* Make sure hardware complete it */
1088 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001089 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001091 spin_unlock_irqrestore(&iommu->register_lock, flags);
1092 return 0;
1093}
1094
1095static int iommu_disable_translation(struct intel_iommu *iommu)
1096{
1097 u32 sts;
1098 unsigned long flag;
1099
1100 spin_lock_irqsave(&iommu->register_lock, flag);
1101 iommu->gcmd &= ~DMA_GCMD_TE;
1102 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1103
1104 /* Make sure hardware complete it */
1105 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001106 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001107
1108 spin_unlock_irqrestore(&iommu->register_lock, flag);
1109 return 0;
1110}
1111
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001112
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001113static int iommu_init_domains(struct intel_iommu *iommu)
1114{
1115 unsigned long ndomains;
1116 unsigned long nlongs;
1117
1118 ndomains = cap_ndoms(iommu->cap);
1119 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1120 nlongs = BITS_TO_LONGS(ndomains);
1121
1122 /* TBD: there might be 64K domains,
1123 * consider other allocation for future chip
1124 */
1125 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1126 if (!iommu->domain_ids) {
1127 printk(KERN_ERR "Allocating domain id array failed\n");
1128 return -ENOMEM;
1129 }
1130 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1131 GFP_KERNEL);
1132 if (!iommu->domains) {
1133 printk(KERN_ERR "Allocating domain array failed\n");
1134 kfree(iommu->domain_ids);
1135 return -ENOMEM;
1136 }
1137
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001138 spin_lock_init(&iommu->lock);
1139
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140 /*
1141 * if Caching mode is set, then invalid translations are tagged
1142 * with domainid 0. Hence we need to pre-allocate it.
1143 */
1144 if (cap_caching_mode(iommu->cap))
1145 set_bit(0, iommu->domain_ids);
1146 return 0;
1147}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001148
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149
1150static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001151static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001152
1153void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001154{
1155 struct dmar_domain *domain;
1156 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001157 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1160 for (; i < cap_ndoms(iommu->cap); ) {
1161 domain = iommu->domains[i];
1162 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001163
1164 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001165 if (--domain->iommu_count == 0) {
1166 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1167 vm_domain_exit(domain);
1168 else
1169 domain_exit(domain);
1170 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001171 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1172
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001173 i = find_next_bit(iommu->domain_ids,
1174 cap_ndoms(iommu->cap), i+1);
1175 }
1176
1177 if (iommu->gcmd & DMA_GCMD_TE)
1178 iommu_disable_translation(iommu);
1179
1180 if (iommu->irq) {
1181 set_irq_data(iommu->irq, NULL);
1182 /* This will mask the irq */
1183 free_irq(iommu->irq, iommu);
1184 destroy_irq(iommu->irq);
1185 }
1186
1187 kfree(iommu->domains);
1188 kfree(iommu->domain_ids);
1189
Weidong Hand9630fe2008-12-08 11:06:32 +08001190 g_iommus[iommu->seq_id] = NULL;
1191
1192 /* if all iommus are freed, free g_iommus */
1193 for (i = 0; i < g_num_of_iommus; i++) {
1194 if (g_iommus[i])
1195 break;
1196 }
1197
1198 if (i == g_num_of_iommus)
1199 kfree(g_iommus);
1200
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001201 /* free context mapping */
1202 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203}
1204
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001205static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001206{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001208
1209 domain = alloc_domain_mem();
1210 if (!domain)
1211 return NULL;
1212
Weidong Han8c11e792008-12-08 15:29:22 +08001213 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001214 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215
1216 return domain;
1217}
1218
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001219static int iommu_attach_domain(struct dmar_domain *domain,
1220 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001221{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001222 int num;
1223 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224 unsigned long flags;
1225
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001226 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001227
1228 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001229
1230 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1231 if (num >= ndomains) {
1232 spin_unlock_irqrestore(&iommu->lock, flags);
1233 printk(KERN_ERR "IOMMU: no free domain ids\n");
1234 return -ENOMEM;
1235 }
1236
1237 domain->id = num;
1238 set_bit(num, iommu->domain_ids);
1239 set_bit(iommu->seq_id, &domain->iommu_bmp);
1240 iommu->domains[num] = domain;
1241 spin_unlock_irqrestore(&iommu->lock, flags);
1242
1243 return 0;
1244}
1245
1246static void iommu_detach_domain(struct dmar_domain *domain,
1247 struct intel_iommu *iommu)
1248{
1249 unsigned long flags;
1250 int num, ndomains;
1251 int found = 0;
1252
1253 spin_lock_irqsave(&iommu->lock, flags);
1254 ndomains = cap_ndoms(iommu->cap);
1255 num = find_first_bit(iommu->domain_ids, ndomains);
1256 for (; num < ndomains; ) {
1257 if (iommu->domains[num] == domain) {
1258 found = 1;
1259 break;
1260 }
1261 num = find_next_bit(iommu->domain_ids,
1262 cap_ndoms(iommu->cap), num+1);
1263 }
1264
1265 if (found) {
1266 clear_bit(num, iommu->domain_ids);
1267 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1268 iommu->domains[num] = NULL;
1269 }
Weidong Han8c11e792008-12-08 15:29:22 +08001270 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001271}
1272
1273static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001274static struct lock_class_key reserved_alloc_key;
1275static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001276
1277static void dmar_init_reserved_ranges(void)
1278{
1279 struct pci_dev *pdev = NULL;
1280 struct iova *iova;
1281 int i;
1282 u64 addr, size;
1283
David Millerf6611972008-02-06 01:36:23 -08001284 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001285
Mark Gross8a443df2008-03-04 14:59:31 -08001286 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1287 &reserved_alloc_key);
1288 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1289 &reserved_rbtree_key);
1290
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001291 /* IOAPIC ranges shouldn't be accessed by DMA */
1292 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1293 IOVA_PFN(IOAPIC_RANGE_END));
1294 if (!iova)
1295 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1296
1297 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1298 for_each_pci_dev(pdev) {
1299 struct resource *r;
1300
1301 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1302 r = &pdev->resource[i];
1303 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1304 continue;
1305 addr = r->start;
David Woodhousefd18de52009-05-10 23:57:41 +01001306 addr &= PHYSICAL_PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307 size = r->end - addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001308 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1310 IOVA_PFN(size + addr) - 1);
1311 if (!iova)
1312 printk(KERN_ERR "Reserve iova failed\n");
1313 }
1314 }
1315
1316}
1317
1318static void domain_reserve_special_ranges(struct dmar_domain *domain)
1319{
1320 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1321}
1322
1323static inline int guestwidth_to_adjustwidth(int gaw)
1324{
1325 int agaw;
1326 int r = (gaw - 12) % 9;
1327
1328 if (r == 0)
1329 agaw = gaw;
1330 else
1331 agaw = gaw + 9 - r;
1332 if (agaw > 64)
1333 agaw = 64;
1334 return agaw;
1335}
1336
1337static int domain_init(struct dmar_domain *domain, int guest_width)
1338{
1339 struct intel_iommu *iommu;
1340 int adjust_width, agaw;
1341 unsigned long sagaw;
1342
David Millerf6611972008-02-06 01:36:23 -08001343 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344 spin_lock_init(&domain->mapping_lock);
Weidong Hanc7151a82008-12-08 22:51:37 +08001345 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001346
1347 domain_reserve_special_ranges(domain);
1348
1349 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001350 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001351 if (guest_width > cap_mgaw(iommu->cap))
1352 guest_width = cap_mgaw(iommu->cap);
1353 domain->gaw = guest_width;
1354 adjust_width = guestwidth_to_adjustwidth(guest_width);
1355 agaw = width_to_agaw(adjust_width);
1356 sagaw = cap_sagaw(iommu->cap);
1357 if (!test_bit(agaw, &sagaw)) {
1358 /* hardware doesn't support it, choose a bigger one */
1359 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1360 agaw = find_next_bit(&sagaw, 5, agaw);
1361 if (agaw >= 5)
1362 return -ENODEV;
1363 }
1364 domain->agaw = agaw;
1365 INIT_LIST_HEAD(&domain->devices);
1366
Weidong Han8e6040972008-12-08 15:49:06 +08001367 if (ecap_coherent(iommu->ecap))
1368 domain->iommu_coherency = 1;
1369 else
1370 domain->iommu_coherency = 0;
1371
Sheng Yang58c610b2009-03-18 15:33:05 +08001372 if (ecap_sc_support(iommu->ecap))
1373 domain->iommu_snooping = 1;
1374 else
1375 domain->iommu_snooping = 0;
1376
Weidong Hanc7151a82008-12-08 22:51:37 +08001377 domain->iommu_count = 1;
1378
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001379 /* always allocate the top pgd */
1380 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1381 if (!domain->pgd)
1382 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001383 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001384 return 0;
1385}
1386
1387static void domain_exit(struct dmar_domain *domain)
1388{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001389 struct dmar_drhd_unit *drhd;
1390 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391 u64 end;
1392
1393 /* Domain 0 is reserved, so dont process it */
1394 if (!domain)
1395 return;
1396
1397 domain_remove_dev_info(domain);
1398 /* destroy iovas */
1399 put_iova_domain(&domain->iovad);
1400 end = DOMAIN_MAX_ADDR(domain->gaw);
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001401 end = end & (~PAGE_MASK);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001402
1403 /* clear ptes */
1404 dma_pte_clear_range(domain, 0, end);
1405
1406 /* free page tables */
1407 dma_pte_free_pagetable(domain, 0, end);
1408
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001409 for_each_active_iommu(iommu, drhd)
1410 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1411 iommu_detach_domain(domain, iommu);
1412
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001413 free_domain_mem(domain);
1414}
1415
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001416static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1417 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418{
1419 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001420 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001421 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001422 struct dma_pte *pgd;
1423 unsigned long num;
1424 unsigned long ndomains;
1425 int id;
1426 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001427 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001428
1429 pr_debug("Set context mapping for %02x:%02x.%d\n",
1430 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001431
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001433 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1434 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001435
David Woodhouse276dbf992009-04-04 01:45:37 +01001436 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001437 if (!iommu)
1438 return -ENODEV;
1439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001440 context = device_to_context_entry(iommu, bus, devfn);
1441 if (!context)
1442 return -ENOMEM;
1443 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001444 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001445 spin_unlock_irqrestore(&iommu->lock, flags);
1446 return 0;
1447 }
1448
Weidong Hanea6606b2008-12-08 23:08:15 +08001449 id = domain->id;
1450 pgd = domain->pgd;
1451
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001452 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1453 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001454 int found = 0;
1455
1456 /* find an available domain id for this device in iommu */
1457 ndomains = cap_ndoms(iommu->cap);
1458 num = find_first_bit(iommu->domain_ids, ndomains);
1459 for (; num < ndomains; ) {
1460 if (iommu->domains[num] == domain) {
1461 id = num;
1462 found = 1;
1463 break;
1464 }
1465 num = find_next_bit(iommu->domain_ids,
1466 cap_ndoms(iommu->cap), num+1);
1467 }
1468
1469 if (found == 0) {
1470 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1471 if (num >= ndomains) {
1472 spin_unlock_irqrestore(&iommu->lock, flags);
1473 printk(KERN_ERR "IOMMU: no free domain ids\n");
1474 return -EFAULT;
1475 }
1476
1477 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001478 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001479 iommu->domains[num] = domain;
1480 id = num;
1481 }
1482
1483 /* Skip top levels of page tables for
1484 * iommu which has less agaw than default.
1485 */
1486 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1487 pgd = phys_to_virt(dma_pte_addr(pgd));
1488 if (!dma_pte_present(pgd)) {
1489 spin_unlock_irqrestore(&iommu->lock, flags);
1490 return -ENOMEM;
1491 }
1492 }
1493 }
1494
1495 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001496
Yu Zhao93a23a72009-05-18 13:51:37 +08001497 if (translation != CONTEXT_TT_PASS_THROUGH) {
1498 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1499 translation = info ? CONTEXT_TT_DEV_IOTLB :
1500 CONTEXT_TT_MULTI_LEVEL;
1501 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001502 /*
1503 * In pass through mode, AW must be programmed to indicate the largest
1504 * AGAW value supported by hardware. And ASR is ignored by hardware.
1505 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001506 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001507 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001508 else {
1509 context_set_address_root(context, virt_to_phys(pgd));
1510 context_set_address_width(context, iommu->agaw);
1511 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001512
1513 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001514 context_set_fault_enable(context);
1515 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001516 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001517
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001518 /*
1519 * It's a non-present to present mapping. If hardware doesn't cache
1520 * non-present entry we only need to flush the write-buffer. If the
1521 * _does_ cache non-present entries, then it does so in the special
1522 * domain #0, which we have to flush:
1523 */
1524 if (cap_caching_mode(iommu->cap)) {
1525 iommu->flush.flush_context(iommu, 0,
1526 (((u16)bus) << 8) | devfn,
1527 DMA_CCMD_MASK_NOBIT,
1528 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001529 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001530 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001531 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001532 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001533 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001535
1536 spin_lock_irqsave(&domain->iommu_lock, flags);
1537 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1538 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001539 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001540 }
1541 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001542 return 0;
1543}
1544
1545static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001546domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1547 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548{
1549 int ret;
1550 struct pci_dev *tmp, *parent;
1551
David Woodhouse276dbf992009-04-04 01:45:37 +01001552 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001553 pdev->bus->number, pdev->devfn,
1554 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 if (ret)
1556 return ret;
1557
1558 /* dependent device mapping */
1559 tmp = pci_find_upstream_pcie_bridge(pdev);
1560 if (!tmp)
1561 return 0;
1562 /* Secondary interface's bus number and devfn 0 */
1563 parent = pdev->bus->self;
1564 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001565 ret = domain_context_mapping_one(domain,
1566 pci_domain_nr(parent->bus),
1567 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001568 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001569 if (ret)
1570 return ret;
1571 parent = parent->bus->self;
1572 }
1573 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1574 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001575 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001576 tmp->subordinate->number, 0,
1577 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001578 else /* this is a legacy PCI bridge */
1579 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001580 pci_domain_nr(tmp->bus),
1581 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001582 tmp->devfn,
1583 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001584}
1585
Weidong Han5331fe62008-12-08 23:00:00 +08001586static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001587{
1588 int ret;
1589 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001590 struct intel_iommu *iommu;
1591
David Woodhouse276dbf992009-04-04 01:45:37 +01001592 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1593 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001594 if (!iommu)
1595 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596
David Woodhouse276dbf992009-04-04 01:45:37 +01001597 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001598 if (!ret)
1599 return ret;
1600 /* dependent device mapping */
1601 tmp = pci_find_upstream_pcie_bridge(pdev);
1602 if (!tmp)
1603 return ret;
1604 /* Secondary interface's bus number and devfn 0 */
1605 parent = pdev->bus->self;
1606 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001607 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001608 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001609 if (!ret)
1610 return ret;
1611 parent = parent->bus->self;
1612 }
1613 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001614 return device_context_mapped(iommu, tmp->subordinate->number,
1615 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001616 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001617 return device_context_mapped(iommu, tmp->bus->number,
1618 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001619}
1620
1621static int
1622domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1623 u64 hpa, size_t size, int prot)
1624{
1625 u64 start_pfn, end_pfn;
1626 struct dma_pte *pte;
1627 int index;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001628 int addr_width = agaw_to_width(domain->agaw);
1629
1630 hpa &= (((u64)1) << addr_width) - 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631
1632 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1633 return -EINVAL;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001634 iova &= PAGE_MASK;
1635 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1636 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637 index = 0;
1638 while (start_pfn < end_pfn) {
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001639 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 if (!pte)
1641 return -ENOMEM;
1642 /* We don't need lock here, nobody else
1643 * touches the iova range
1644 */
Mark McLoughlin19c239c2008-11-21 16:56:53 +00001645 BUG_ON(dma_pte_addr(pte));
1646 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1647 dma_set_pte_prot(pte, prot);
Sheng Yang9cf066972009-03-18 15:33:07 +08001648 if (prot & DMA_PTE_SNP)
1649 dma_set_pte_snp(pte);
Weidong Han5331fe62008-12-08 23:00:00 +08001650 domain_flush_cache(domain, pte, sizeof(*pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001651 start_pfn++;
1652 index++;
1653 }
1654 return 0;
1655}
1656
Weidong Hanc7151a82008-12-08 22:51:37 +08001657static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001658{
Weidong Hanc7151a82008-12-08 22:51:37 +08001659 if (!iommu)
1660 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001661
1662 clear_context_table(iommu, bus, devfn);
1663 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001664 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001665 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001666}
1667
1668static void domain_remove_dev_info(struct dmar_domain *domain)
1669{
1670 struct device_domain_info *info;
1671 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001672 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673
1674 spin_lock_irqsave(&device_domain_lock, flags);
1675 while (!list_empty(&domain->devices)) {
1676 info = list_entry(domain->devices.next,
1677 struct device_domain_info, link);
1678 list_del(&info->link);
1679 list_del(&info->global);
1680 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001681 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001682 spin_unlock_irqrestore(&device_domain_lock, flags);
1683
Yu Zhao93a23a72009-05-18 13:51:37 +08001684 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001685 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001686 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001687 free_devinfo_mem(info);
1688
1689 spin_lock_irqsave(&device_domain_lock, flags);
1690 }
1691 spin_unlock_irqrestore(&device_domain_lock, flags);
1692}
1693
1694/*
1695 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001696 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 */
Kay, Allen M38717942008-09-09 18:37:29 +03001698static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001699find_domain(struct pci_dev *pdev)
1700{
1701 struct device_domain_info *info;
1702
1703 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001704 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001705 if (info)
1706 return info->domain;
1707 return NULL;
1708}
1709
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001710/* domain is initialized */
1711static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1712{
1713 struct dmar_domain *domain, *found = NULL;
1714 struct intel_iommu *iommu;
1715 struct dmar_drhd_unit *drhd;
1716 struct device_domain_info *info, *tmp;
1717 struct pci_dev *dev_tmp;
1718 unsigned long flags;
1719 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001720 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001721 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001722
1723 domain = find_domain(pdev);
1724 if (domain)
1725 return domain;
1726
David Woodhouse276dbf992009-04-04 01:45:37 +01001727 segment = pci_domain_nr(pdev->bus);
1728
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001729 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1730 if (dev_tmp) {
1731 if (dev_tmp->is_pcie) {
1732 bus = dev_tmp->subordinate->number;
1733 devfn = 0;
1734 } else {
1735 bus = dev_tmp->bus->number;
1736 devfn = dev_tmp->devfn;
1737 }
1738 spin_lock_irqsave(&device_domain_lock, flags);
1739 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001740 if (info->segment == segment &&
1741 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742 found = info->domain;
1743 break;
1744 }
1745 }
1746 spin_unlock_irqrestore(&device_domain_lock, flags);
1747 /* pcie-pci bridge already has a domain, uses it */
1748 if (found) {
1749 domain = found;
1750 goto found_domain;
1751 }
1752 }
1753
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001754 domain = alloc_domain();
1755 if (!domain)
1756 goto error;
1757
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001758 /* Allocate new domain for the device */
1759 drhd = dmar_find_matched_drhd_unit(pdev);
1760 if (!drhd) {
1761 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1762 pci_name(pdev));
1763 return NULL;
1764 }
1765 iommu = drhd->iommu;
1766
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001767 ret = iommu_attach_domain(domain, iommu);
1768 if (ret) {
1769 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001770 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001771 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001772
1773 if (domain_init(domain, gaw)) {
1774 domain_exit(domain);
1775 goto error;
1776 }
1777
1778 /* register pcie-to-pci device */
1779 if (dev_tmp) {
1780 info = alloc_devinfo_mem();
1781 if (!info) {
1782 domain_exit(domain);
1783 goto error;
1784 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001785 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001786 info->bus = bus;
1787 info->devfn = devfn;
1788 info->dev = NULL;
1789 info->domain = domain;
1790 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001791 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792
1793 /* pcie-to-pci bridge already has a domain, uses it */
1794 found = NULL;
1795 spin_lock_irqsave(&device_domain_lock, flags);
1796 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001797 if (tmp->segment == segment &&
1798 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 found = tmp->domain;
1800 break;
1801 }
1802 }
1803 if (found) {
1804 free_devinfo_mem(info);
1805 domain_exit(domain);
1806 domain = found;
1807 } else {
1808 list_add(&info->link, &domain->devices);
1809 list_add(&info->global, &device_domain_list);
1810 }
1811 spin_unlock_irqrestore(&device_domain_lock, flags);
1812 }
1813
1814found_domain:
1815 info = alloc_devinfo_mem();
1816 if (!info)
1817 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001818 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001819 info->bus = pdev->bus->number;
1820 info->devfn = pdev->devfn;
1821 info->dev = pdev;
1822 info->domain = domain;
1823 spin_lock_irqsave(&device_domain_lock, flags);
1824 /* somebody is fast */
1825 found = find_domain(pdev);
1826 if (found != NULL) {
1827 spin_unlock_irqrestore(&device_domain_lock, flags);
1828 if (found != domain) {
1829 domain_exit(domain);
1830 domain = found;
1831 }
1832 free_devinfo_mem(info);
1833 return domain;
1834 }
1835 list_add(&info->link, &domain->devices);
1836 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001837 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001838 spin_unlock_irqrestore(&device_domain_lock, flags);
1839 return domain;
1840error:
1841 /* recheck it here, maybe others set it */
1842 return find_domain(pdev);
1843}
1844
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001845static int iommu_identity_mapping;
1846
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001847static int iommu_prepare_identity_map(struct pci_dev *pdev,
1848 unsigned long long start,
1849 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001850{
1851 struct dmar_domain *domain;
1852 unsigned long size;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001853 unsigned long long base;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001854 int ret;
1855
1856 printk(KERN_INFO
1857 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1858 pci_name(pdev), start, end);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001859 if (iommu_identity_mapping)
1860 domain = si_domain;
1861 else
1862 /* page table init */
1863 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001864 if (!domain)
1865 return -ENOMEM;
1866
1867 /* The address might not be aligned */
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001868 base = start & PAGE_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 size = end - base;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001870 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1872 IOVA_PFN(base + size) - 1)) {
1873 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1874 ret = -ENOMEM;
1875 goto error;
1876 }
1877
1878 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1879 size, base, pci_name(pdev));
1880 /*
1881 * RMRR range might have overlap with physical memory range,
1882 * clear it first
1883 */
1884 dma_pte_clear_range(domain, base, base + size);
1885
1886 ret = domain_page_mapping(domain, base, base, size,
1887 DMA_PTE_READ|DMA_PTE_WRITE);
1888 if (ret)
1889 goto error;
1890
1891 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001892 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001893 if (!ret)
1894 return 0;
1895error:
1896 domain_exit(domain);
1897 return ret;
1898
1899}
1900
1901static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1902 struct pci_dev *pdev)
1903{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001904 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001905 return 0;
1906 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1907 rmrr->end_address + 1);
1908}
1909
Yinghai Lud52d53b2008-06-16 20:10:55 -07001910struct iommu_prepare_data {
1911 struct pci_dev *pdev;
1912 int ret;
1913};
1914
1915static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1916 unsigned long end_pfn, void *datax)
1917{
1918 struct iommu_prepare_data *data;
1919
1920 data = (struct iommu_prepare_data *)datax;
1921
1922 data->ret = iommu_prepare_identity_map(data->pdev,
1923 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1924 return data->ret;
1925
1926}
1927
1928static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1929{
1930 int nid;
1931 struct iommu_prepare_data data;
1932
1933 data.pdev = pdev;
1934 data.ret = 0;
1935
1936 for_each_online_node(nid) {
1937 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1938 if (data.ret)
1939 return data.ret;
1940 }
1941 return data.ret;
1942}
1943
Chris Wright7e25a242009-06-25 18:52:05 -07001944#ifdef CONFIG_DMAR_GFX_WA
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001945static void __init iommu_prepare_gfx_mapping(void)
1946{
1947 struct pci_dev *pdev = NULL;
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001948 int ret;
1949
1950 for_each_pci_dev(pdev) {
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001951 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001952 !IS_GFX_DEVICE(pdev))
1953 continue;
1954 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1955 pci_name(pdev));
Yinghai Lud52d53b2008-06-16 20:10:55 -07001956 ret = iommu_prepare_with_active_regions(pdev);
1957 if (ret)
1958 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001959 }
1960}
Mark McLoughlin2abd7e12008-11-20 15:49:50 +00001961#else /* !CONFIG_DMAR_GFX_WA */
1962static inline void iommu_prepare_gfx_mapping(void)
1963{
1964 return;
1965}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07001966#endif
1967
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001968#ifdef CONFIG_DMAR_FLOPPY_WA
1969static inline void iommu_prepare_isa(void)
1970{
1971 struct pci_dev *pdev;
1972 int ret;
1973
1974 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1975 if (!pdev)
1976 return;
1977
1978 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1979 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1980
1981 if (ret)
Frank Seidel1c35b8e2009-02-06 10:23:36 +01001982 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001983 "floppy might not work\n");
1984
1985}
1986#else
1987static inline void iommu_prepare_isa(void)
1988{
1989 return;
1990}
1991#endif /* !CONFIG_DMAR_FLPY_WA */
1992
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001993/* Initialize each context entry as pass through.*/
1994static int __init init_context_pass_through(void)
1995{
1996 struct pci_dev *pdev = NULL;
1997 struct dmar_domain *domain;
1998 int ret;
1999
2000 for_each_pci_dev(pdev) {
2001 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2002 ret = domain_context_mapping(domain, pdev,
2003 CONTEXT_TT_PASS_THROUGH);
2004 if (ret)
2005 return ret;
2006 }
2007 return 0;
2008}
2009
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002010static int md_domain_init(struct dmar_domain *domain, int guest_width);
2011static int si_domain_init(void)
2012{
2013 struct dmar_drhd_unit *drhd;
2014 struct intel_iommu *iommu;
2015 int ret = 0;
2016
2017 si_domain = alloc_domain();
2018 if (!si_domain)
2019 return -EFAULT;
2020
2021
2022 for_each_active_iommu(iommu, drhd) {
2023 ret = iommu_attach_domain(si_domain, iommu);
2024 if (ret) {
2025 domain_exit(si_domain);
2026 return -EFAULT;
2027 }
2028 }
2029
2030 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2031 domain_exit(si_domain);
2032 return -EFAULT;
2033 }
2034
2035 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2036
2037 return 0;
2038}
2039
2040static void domain_remove_one_dev_info(struct dmar_domain *domain,
2041 struct pci_dev *pdev);
2042static int identity_mapping(struct pci_dev *pdev)
2043{
2044 struct device_domain_info *info;
2045
2046 if (likely(!iommu_identity_mapping))
2047 return 0;
2048
2049
2050 list_for_each_entry(info, &si_domain->devices, link)
2051 if (info->dev == pdev)
2052 return 1;
2053 return 0;
2054}
2055
2056static int domain_add_dev_info(struct dmar_domain *domain,
2057 struct pci_dev *pdev)
2058{
2059 struct device_domain_info *info;
2060 unsigned long flags;
2061
2062 info = alloc_devinfo_mem();
2063 if (!info)
2064 return -ENOMEM;
2065
2066 info->segment = pci_domain_nr(pdev->bus);
2067 info->bus = pdev->bus->number;
2068 info->devfn = pdev->devfn;
2069 info->dev = pdev;
2070 info->domain = domain;
2071
2072 spin_lock_irqsave(&device_domain_lock, flags);
2073 list_add(&info->link, &domain->devices);
2074 list_add(&info->global, &device_domain_list);
2075 pdev->dev.archdata.iommu = info;
2076 spin_unlock_irqrestore(&device_domain_lock, flags);
2077
2078 return 0;
2079}
2080
2081static int iommu_prepare_static_identity_mapping(void)
2082{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002083 struct pci_dev *pdev = NULL;
2084 int ret;
2085
2086 ret = si_domain_init();
2087 if (ret)
2088 return -EFAULT;
2089
2090 printk(KERN_INFO "IOMMU: Setting identity map:\n");
2091 for_each_pci_dev(pdev) {
Chris Wright7e25a242009-06-25 18:52:05 -07002092 ret = iommu_prepare_with_active_regions(pdev);
2093 if (ret) {
2094 printk(KERN_INFO "1:1 mapping to one domain failed.\n");
2095 return -EFAULT;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002096 }
2097 ret = domain_add_dev_info(si_domain, pdev);
2098 if (ret)
2099 return ret;
2100 }
2101
2102 return 0;
2103}
2104
2105int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002106{
2107 struct dmar_drhd_unit *drhd;
2108 struct dmar_rmrr_unit *rmrr;
2109 struct pci_dev *pdev;
2110 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002111 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002112 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002113
2114 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002115 * In case pass through can not be enabled, iommu tries to use identity
2116 * mapping.
2117 */
2118 if (iommu_pass_through)
2119 iommu_identity_mapping = 1;
2120
2121 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002122 * for each drhd
2123 * allocate root
2124 * initialize and program root entry to not present
2125 * endfor
2126 */
2127 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002128 g_num_of_iommus++;
2129 /*
2130 * lock not needed as this is only incremented in the single
2131 * threaded kernel __init code path all other access are read
2132 * only
2133 */
2134 }
2135
Weidong Hand9630fe2008-12-08 11:06:32 +08002136 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2137 GFP_KERNEL);
2138 if (!g_iommus) {
2139 printk(KERN_ERR "Allocating global iommu array failed\n");
2140 ret = -ENOMEM;
2141 goto error;
2142 }
2143
mark gross80b20dd2008-04-18 13:53:58 -07002144 deferred_flush = kzalloc(g_num_of_iommus *
2145 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2146 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002147 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002148 ret = -ENOMEM;
2149 goto error;
2150 }
2151
mark gross5e0d2a62008-03-04 15:22:08 -08002152 for_each_drhd_unit(drhd) {
2153 if (drhd->ignored)
2154 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002155
2156 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002157 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002158
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002159 ret = iommu_init_domains(iommu);
2160 if (ret)
2161 goto error;
2162
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002163 /*
2164 * TBD:
2165 * we could share the same root & context tables
2166 * amoung all IOMMU's. Need to Split it later.
2167 */
2168 ret = iommu_alloc_root_entry(iommu);
2169 if (ret) {
2170 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2171 goto error;
2172 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002173 if (!ecap_pass_through(iommu->ecap))
2174 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002175 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002176 if (iommu_pass_through)
2177 if (!pass_through) {
2178 printk(KERN_INFO
2179 "Pass Through is not supported by hardware.\n");
2180 iommu_pass_through = 0;
2181 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002182
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002183 /*
2184 * Start from the sane iommu hardware state.
2185 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002186 for_each_drhd_unit(drhd) {
2187 if (drhd->ignored)
2188 continue;
2189
2190 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002191
2192 /*
2193 * If the queued invalidation is already initialized by us
2194 * (for example, while enabling interrupt-remapping) then
2195 * we got the things already rolling from a sane state.
2196 */
2197 if (iommu->qi)
2198 continue;
2199
2200 /*
2201 * Clear any previous faults.
2202 */
2203 dmar_fault(-1, iommu);
2204 /*
2205 * Disable queued invalidation if supported and already enabled
2206 * before OS handover.
2207 */
2208 dmar_disable_qi(iommu);
2209 }
2210
2211 for_each_drhd_unit(drhd) {
2212 if (drhd->ignored)
2213 continue;
2214
2215 iommu = drhd->iommu;
2216
Youquan Songa77b67d2008-10-16 16:31:56 -07002217 if (dmar_enable_qi(iommu)) {
2218 /*
2219 * Queued Invalidate not enabled, use Register Based
2220 * Invalidate
2221 */
2222 iommu->flush.flush_context = __iommu_flush_context;
2223 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2224 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002225 "invalidation\n",
2226 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002227 } else {
2228 iommu->flush.flush_context = qi_flush_context;
2229 iommu->flush.flush_iotlb = qi_flush_iotlb;
2230 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002231 "invalidation\n",
2232 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002233 }
2234 }
2235
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002236 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002237 * If pass through is set and enabled, context entries of all pci
2238 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002239 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002240 if (iommu_pass_through) {
2241 ret = init_context_pass_through();
2242 if (ret) {
2243 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2244 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002245 }
2246 }
2247
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002248 /*
2249 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002250 * identity mappings for rmrr, gfx, and isa and may fall back to static
2251 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002252 */
2253 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002254 if (iommu_identity_mapping)
2255 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002256 /*
2257 * For each rmrr
2258 * for each dev attached to rmrr
2259 * do
2260 * locate drhd for dev, alloc domain for dev
2261 * allocate free domain
2262 * allocate page table entries for rmrr
2263 * if context not allocated for bus
2264 * allocate and init context
2265 * set present in root table for this bus
2266 * init context with domain, translation etc
2267 * endfor
2268 * endfor
2269 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002270 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002271 for_each_rmrr_units(rmrr) {
2272 for (i = 0; i < rmrr->devices_cnt; i++) {
2273 pdev = rmrr->devices[i];
2274 /*
2275 * some BIOS lists non-exist devices in DMAR
2276 * table.
2277 */
2278 if (!pdev)
2279 continue;
2280 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2281 if (ret)
2282 printk(KERN_ERR
2283 "IOMMU: mapping reserved region failed\n");
2284 }
2285 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002286
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002287 iommu_prepare_gfx_mapping();
2288
2289 iommu_prepare_isa();
2290 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002291
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002292 /*
2293 * for each drhd
2294 * enable fault log
2295 * global invalidate context cache
2296 * global invalidate iotlb
2297 * enable translation
2298 */
2299 for_each_drhd_unit(drhd) {
2300 if (drhd->ignored)
2301 continue;
2302 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002303
2304 iommu_flush_write_buffer(iommu);
2305
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002306 ret = dmar_set_interrupt(iommu);
2307 if (ret)
2308 goto error;
2309
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002310 iommu_set_root_entry(iommu);
2311
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002312 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002313 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002314 iommu_disable_protect_mem_regions(iommu);
2315
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002316 ret = iommu_enable_translation(iommu);
2317 if (ret)
2318 goto error;
2319 }
2320
2321 return 0;
2322error:
2323 for_each_drhd_unit(drhd) {
2324 if (drhd->ignored)
2325 continue;
2326 iommu = drhd->iommu;
2327 free_iommu(iommu);
2328 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002329 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002330 return ret;
2331}
2332
2333static inline u64 aligned_size(u64 host_addr, size_t size)
2334{
2335 u64 addr;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002336 addr = (host_addr & (~PAGE_MASK)) + size;
2337 return PAGE_ALIGN(addr);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002338}
2339
2340struct iova *
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002341iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002342{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002343 struct iova *piova;
2344
2345 /* Make sure it's in range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002346 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002347 if (!size || (IOVA_START_ADDR + size > end))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002348 return NULL;
2349
2350 piova = alloc_iova(&domain->iovad,
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002351 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002352 return piova;
2353}
2354
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002355static struct iova *
2356__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002357 size_t size, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002358{
2359 struct pci_dev *pdev = to_pci_dev(dev);
2360 struct iova *iova = NULL;
2361
Yang Hongyang284901a2009-04-06 19:01:15 -07002362 if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002363 iova = iommu_alloc_iova(domain, size, dma_mask);
2364 else {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002365 /*
2366 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002367 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002368 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002369 */
Yang Hongyang284901a2009-04-06 19:01:15 -07002370 iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002371 if (!iova)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002372 iova = iommu_alloc_iova(domain, size, dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002373 }
2374
2375 if (!iova) {
2376 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2377 return NULL;
2378 }
2379
2380 return iova;
2381}
2382
2383static struct dmar_domain *
2384get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002385{
2386 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002387 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388
2389 domain = get_domain_for_dev(pdev,
2390 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2391 if (!domain) {
2392 printk(KERN_ERR
2393 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002394 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002395 }
2396
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002397 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002398 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002399 ret = domain_context_mapping(domain, pdev,
2400 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002401 if (ret) {
2402 printk(KERN_ERR
2403 "Domain context map for %s failed",
2404 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002405 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002406 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002407 }
2408
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002409 return domain;
2410}
2411
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002412static int iommu_dummy(struct pci_dev *pdev)
2413{
2414 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2415}
2416
2417/* Check if the pdev needs to go through non-identity map and unmap process.*/
2418static int iommu_no_mapping(struct pci_dev *pdev)
2419{
2420 int found;
2421
2422 if (!iommu_identity_mapping)
2423 return iommu_dummy(pdev);
2424
2425 found = identity_mapping(pdev);
2426 if (found) {
2427 if (pdev->dma_mask > DMA_BIT_MASK(32))
2428 return 1;
2429 else {
2430 /*
2431 * 32 bit DMA is removed from si_domain and fall back
2432 * to non-identity mapping.
2433 */
2434 domain_remove_one_dev_info(si_domain, pdev);
2435 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2436 pci_name(pdev));
2437 return 0;
2438 }
2439 } else {
2440 /*
2441 * In case of a detached 64 bit DMA device from vm, the device
2442 * is put into si_domain for identity mapping.
2443 */
2444 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2445 int ret;
2446 ret = domain_add_dev_info(si_domain, pdev);
2447 if (!ret) {
2448 printk(KERN_INFO "64bit %s uses identity mapping\n",
2449 pci_name(pdev));
2450 return 1;
2451 }
2452 }
2453 }
2454
2455 return iommu_dummy(pdev);
2456}
2457
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002458static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2459 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002460{
2461 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002462 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002463 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002464 struct iova *iova;
2465 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002466 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002467 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002468
2469 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002470
2471 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002472 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002473
2474 domain = get_valid_domain_for_dev(pdev);
2475 if (!domain)
2476 return 0;
2477
Weidong Han8c11e792008-12-08 15:29:22 +08002478 iommu = domain_get_iommu(domain);
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002479 size = aligned_size((u64)paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002480
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002481 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002482 if (!iova)
2483 goto error;
2484
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002485 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002486
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002487 /*
2488 * Check if DMAR supports zero-length reads on write only
2489 * mappings..
2490 */
2491 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002492 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002493 prot |= DMA_PTE_READ;
2494 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2495 prot |= DMA_PTE_WRITE;
2496 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002497 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002498 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002499 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002500 * is not a big problem
2501 */
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002502 ret = domain_page_mapping(domain, start_paddr,
David Woodhousefd18de52009-05-10 23:57:41 +01002503 ((u64)paddr) & PHYSICAL_PAGE_MASK,
2504 size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002505 if (ret)
2506 goto error;
2507
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002508 /* it's a non-present to present mapping. Only flush if caching mode */
2509 if (cap_caching_mode(iommu->cap))
2510 iommu_flush_iotlb_psi(iommu, 0, start_paddr,
2511 size >> VTD_PAGE_SHIFT);
2512 else
Weidong Han8c11e792008-12-08 15:29:22 +08002513 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002514
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002515 return start_paddr + ((u64)paddr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002516
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002517error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002518 if (iova)
2519 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002520 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002521 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002522 return 0;
2523}
2524
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002525static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2526 unsigned long offset, size_t size,
2527 enum dma_data_direction dir,
2528 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002529{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002530 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2531 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002532}
2533
mark gross5e0d2a62008-03-04 15:22:08 -08002534static void flush_unmaps(void)
2535{
mark gross80b20dd2008-04-18 13:53:58 -07002536 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002537
mark gross5e0d2a62008-03-04 15:22:08 -08002538 timer_on = 0;
2539
2540 /* just flush them all */
2541 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002542 struct intel_iommu *iommu = g_iommus[i];
2543 if (!iommu)
2544 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002545
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002546 if (!deferred_flush[i].next)
2547 continue;
2548
2549 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002550 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002551 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002552 unsigned long mask;
2553 struct iova *iova = deferred_flush[i].iova[j];
2554
2555 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2556 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2557 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2558 iova->pfn_lo << PAGE_SHIFT, mask);
2559 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002560 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002561 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002562 }
2563
mark gross5e0d2a62008-03-04 15:22:08 -08002564 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002565}
2566
2567static void flush_unmaps_timeout(unsigned long data)
2568{
mark gross80b20dd2008-04-18 13:53:58 -07002569 unsigned long flags;
2570
2571 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002572 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002573 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002574}
2575
2576static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2577{
2578 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002579 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002580 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002581
2582 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002583 if (list_size == HIGH_WATER_MARK)
2584 flush_unmaps();
2585
Weidong Han8c11e792008-12-08 15:29:22 +08002586 iommu = domain_get_iommu(dom);
2587 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002588
mark gross80b20dd2008-04-18 13:53:58 -07002589 next = deferred_flush[iommu_id].next;
2590 deferred_flush[iommu_id].domain[next] = dom;
2591 deferred_flush[iommu_id].iova[next] = iova;
2592 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002593
2594 if (!timer_on) {
2595 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2596 timer_on = 1;
2597 }
2598 list_size++;
2599 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2600}
2601
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002602static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2603 size_t size, enum dma_data_direction dir,
2604 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605{
2606 struct pci_dev *pdev = to_pci_dev(dev);
2607 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002608 unsigned long start_addr;
2609 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002610 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002611
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002612 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002613 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002614
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002615 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002616 BUG_ON(!domain);
2617
Weidong Han8c11e792008-12-08 15:29:22 +08002618 iommu = domain_get_iommu(domain);
2619
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002620 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2621 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002622 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002623
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002624 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625 size = aligned_size((u64)dev_addr, size);
2626
David Woodhouse4cf2e752009-02-11 17:23:43 +00002627 pr_debug("Device %s unmapping: %zx@%llx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002628 pci_name(pdev), size, (unsigned long long)start_addr);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002629
2630 /* clear the whole page */
2631 dma_pte_clear_range(domain, start_addr, start_addr + size);
2632 /* free page tables */
2633 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
mark gross5e0d2a62008-03-04 15:22:08 -08002634 if (intel_iommu_strict) {
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002635 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2636 size >> VTD_PAGE_SHIFT);
mark gross5e0d2a62008-03-04 15:22:08 -08002637 /* free iova */
2638 __free_iova(&domain->iovad, iova);
2639 } else {
2640 add_unmap(domain, iova);
2641 /*
2642 * queue up the release of the unmap to save the 1/6th of the
2643 * cpu used up by the iotlb flush operation...
2644 */
mark gross5e0d2a62008-03-04 15:22:08 -08002645 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646}
2647
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002648static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2649 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002650{
2651 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2652}
2653
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002654static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2655 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656{
2657 void *vaddr;
2658 int order;
2659
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002660 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002661 order = get_order(size);
2662 flags &= ~(GFP_DMA | GFP_DMA32);
2663
2664 vaddr = (void *)__get_free_pages(flags, order);
2665 if (!vaddr)
2666 return NULL;
2667 memset(vaddr, 0, size);
2668
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002669 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2670 DMA_BIDIRECTIONAL,
2671 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002672 if (*dma_handle)
2673 return vaddr;
2674 free_pages((unsigned long)vaddr, order);
2675 return NULL;
2676}
2677
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002678static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2679 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002680{
2681 int order;
2682
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002683 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002684 order = get_order(size);
2685
2686 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2687 free_pages((unsigned long)vaddr, order);
2688}
2689
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002690static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2691 int nelems, enum dma_data_direction dir,
2692 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002693{
2694 int i;
2695 struct pci_dev *pdev = to_pci_dev(hwdev);
2696 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002697 unsigned long start_addr;
2698 struct iova *iova;
2699 size_t size = 0;
David Woodhouse4cf2e752009-02-11 17:23:43 +00002700 phys_addr_t addr;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002701 struct scatterlist *sg;
Weidong Han8c11e792008-12-08 15:29:22 +08002702 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002703
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002704 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002705 return;
2706
2707 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002708 BUG_ON(!domain);
2709
2710 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002711
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002712 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002713 if (!iova)
2714 return;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002715 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002716 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002717 size += aligned_size((u64)addr, sg->length);
2718 }
2719
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002720 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002721
2722 /* clear the whole page */
2723 dma_pte_clear_range(domain, start_addr, start_addr + size);
2724 /* free page tables */
2725 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2726
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002727 iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2728 size >> VTD_PAGE_SHIFT);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729
2730 /* free iova */
2731 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002732}
2733
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002734static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002735 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002736{
2737 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002738 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002739
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002740 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002741 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002742 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002743 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002744 }
2745 return nelems;
2746}
2747
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002748static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2749 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750{
David Woodhouse4cf2e752009-02-11 17:23:43 +00002751 phys_addr_t addr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002753 struct pci_dev *pdev = to_pci_dev(hwdev);
2754 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002755 size_t size = 0;
2756 int prot = 0;
2757 size_t offset = 0;
2758 struct iova *iova = NULL;
2759 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002760 struct scatterlist *sg;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002761 unsigned long start_addr;
Weidong Han8c11e792008-12-08 15:29:22 +08002762 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002763
2764 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002765 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002766 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002767
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002768 domain = get_valid_domain_for_dev(pdev);
2769 if (!domain)
2770 return 0;
2771
Weidong Han8c11e792008-12-08 15:29:22 +08002772 iommu = domain_get_iommu(domain);
2773
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002774 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002775 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002776 size += aligned_size((u64)addr, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002777 }
2778
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002779 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002780 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002781 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002782 return 0;
2783 }
2784
2785 /*
2786 * Check if DMAR supports zero-length reads on write only
2787 * mappings..
2788 */
2789 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002790 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002791 prot |= DMA_PTE_READ;
2792 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2793 prot |= DMA_PTE_WRITE;
2794
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002795 start_addr = iova->pfn_lo << PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002796 offset = 0;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002797 for_each_sg(sglist, sg, nelems, i) {
David Woodhouse4cf2e752009-02-11 17:23:43 +00002798 addr = page_to_phys(sg_page(sg)) + sg->offset;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002799 size = aligned_size((u64)addr, sg->length);
2800 ret = domain_page_mapping(domain, start_addr + offset,
David Woodhousefd18de52009-05-10 23:57:41 +01002801 ((u64)addr) & PHYSICAL_PAGE_MASK,
2802 size, prot);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002803 if (ret) {
2804 /* clear the page */
2805 dma_pte_clear_range(domain, start_addr,
2806 start_addr + offset);
2807 /* free page tables */
2808 dma_pte_free_pagetable(domain, start_addr,
2809 start_addr + offset);
2810 /* free iova */
2811 __free_iova(&domain->iovad, iova);
2812 return 0;
2813 }
2814 sg->dma_address = start_addr + offset +
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002815 ((u64)addr & (~PAGE_MASK));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002816 sg->dma_length = sg->length;
2817 offset += size;
2818 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002819
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002820 /* it's a non-present to present mapping. Only flush if caching mode */
2821 if (cap_caching_mode(iommu->cap))
2822 iommu_flush_iotlb_psi(iommu, 0, start_addr,
2823 offset >> VTD_PAGE_SHIFT);
2824 else
Weidong Han8c11e792008-12-08 15:29:22 +08002825 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002827 return nelems;
2828}
2829
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002830static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2831{
2832 return !dma_addr;
2833}
2834
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002835struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002836 .alloc_coherent = intel_alloc_coherent,
2837 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002838 .map_sg = intel_map_sg,
2839 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002840 .map_page = intel_map_page,
2841 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002842 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002843};
2844
2845static inline int iommu_domain_cache_init(void)
2846{
2847 int ret = 0;
2848
2849 iommu_domain_cache = kmem_cache_create("iommu_domain",
2850 sizeof(struct dmar_domain),
2851 0,
2852 SLAB_HWCACHE_ALIGN,
2853
2854 NULL);
2855 if (!iommu_domain_cache) {
2856 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2857 ret = -ENOMEM;
2858 }
2859
2860 return ret;
2861}
2862
2863static inline int iommu_devinfo_cache_init(void)
2864{
2865 int ret = 0;
2866
2867 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2868 sizeof(struct device_domain_info),
2869 0,
2870 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002871 NULL);
2872 if (!iommu_devinfo_cache) {
2873 printk(KERN_ERR "Couldn't create devinfo cache\n");
2874 ret = -ENOMEM;
2875 }
2876
2877 return ret;
2878}
2879
2880static inline int iommu_iova_cache_init(void)
2881{
2882 int ret = 0;
2883
2884 iommu_iova_cache = kmem_cache_create("iommu_iova",
2885 sizeof(struct iova),
2886 0,
2887 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002888 NULL);
2889 if (!iommu_iova_cache) {
2890 printk(KERN_ERR "Couldn't create iova cache\n");
2891 ret = -ENOMEM;
2892 }
2893
2894 return ret;
2895}
2896
2897static int __init iommu_init_mempool(void)
2898{
2899 int ret;
2900 ret = iommu_iova_cache_init();
2901 if (ret)
2902 return ret;
2903
2904 ret = iommu_domain_cache_init();
2905 if (ret)
2906 goto domain_error;
2907
2908 ret = iommu_devinfo_cache_init();
2909 if (!ret)
2910 return ret;
2911
2912 kmem_cache_destroy(iommu_domain_cache);
2913domain_error:
2914 kmem_cache_destroy(iommu_iova_cache);
2915
2916 return -ENOMEM;
2917}
2918
2919static void __init iommu_exit_mempool(void)
2920{
2921 kmem_cache_destroy(iommu_devinfo_cache);
2922 kmem_cache_destroy(iommu_domain_cache);
2923 kmem_cache_destroy(iommu_iova_cache);
2924
2925}
2926
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002927static void __init init_no_remapping_devices(void)
2928{
2929 struct dmar_drhd_unit *drhd;
2930
2931 for_each_drhd_unit(drhd) {
2932 if (!drhd->include_all) {
2933 int i;
2934 for (i = 0; i < drhd->devices_cnt; i++)
2935 if (drhd->devices[i] != NULL)
2936 break;
2937 /* ignore DMAR unit if no pci devices exist */
2938 if (i == drhd->devices_cnt)
2939 drhd->ignored = 1;
2940 }
2941 }
2942
2943 if (dmar_map_gfx)
2944 return;
2945
2946 for_each_drhd_unit(drhd) {
2947 int i;
2948 if (drhd->ignored || drhd->include_all)
2949 continue;
2950
2951 for (i = 0; i < drhd->devices_cnt; i++)
2952 if (drhd->devices[i] &&
2953 !IS_GFX_DEVICE(drhd->devices[i]))
2954 break;
2955
2956 if (i < drhd->devices_cnt)
2957 continue;
2958
2959 /* bypass IOMMU if it is just for gfx devices */
2960 drhd->ignored = 1;
2961 for (i = 0; i < drhd->devices_cnt; i++) {
2962 if (!drhd->devices[i])
2963 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002964 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 }
2966 }
2967}
2968
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002969#ifdef CONFIG_SUSPEND
2970static int init_iommu_hw(void)
2971{
2972 struct dmar_drhd_unit *drhd;
2973 struct intel_iommu *iommu = NULL;
2974
2975 for_each_active_iommu(iommu, drhd)
2976 if (iommu->qi)
2977 dmar_reenable_qi(iommu);
2978
2979 for_each_active_iommu(iommu, drhd) {
2980 iommu_flush_write_buffer(iommu);
2981
2982 iommu_set_root_entry(iommu);
2983
2984 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002985 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002986 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002987 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002988 iommu_disable_protect_mem_regions(iommu);
2989 iommu_enable_translation(iommu);
2990 }
2991
2992 return 0;
2993}
2994
2995static void iommu_flush_all(void)
2996{
2997 struct dmar_drhd_unit *drhd;
2998 struct intel_iommu *iommu;
2999
3000 for_each_active_iommu(iommu, drhd) {
3001 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003002 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003003 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003004 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003005 }
3006}
3007
3008static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3009{
3010 struct dmar_drhd_unit *drhd;
3011 struct intel_iommu *iommu = NULL;
3012 unsigned long flag;
3013
3014 for_each_active_iommu(iommu, drhd) {
3015 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3016 GFP_ATOMIC);
3017 if (!iommu->iommu_state)
3018 goto nomem;
3019 }
3020
3021 iommu_flush_all();
3022
3023 for_each_active_iommu(iommu, drhd) {
3024 iommu_disable_translation(iommu);
3025
3026 spin_lock_irqsave(&iommu->register_lock, flag);
3027
3028 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3029 readl(iommu->reg + DMAR_FECTL_REG);
3030 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3031 readl(iommu->reg + DMAR_FEDATA_REG);
3032 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3033 readl(iommu->reg + DMAR_FEADDR_REG);
3034 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3035 readl(iommu->reg + DMAR_FEUADDR_REG);
3036
3037 spin_unlock_irqrestore(&iommu->register_lock, flag);
3038 }
3039 return 0;
3040
3041nomem:
3042 for_each_active_iommu(iommu, drhd)
3043 kfree(iommu->iommu_state);
3044
3045 return -ENOMEM;
3046}
3047
3048static int iommu_resume(struct sys_device *dev)
3049{
3050 struct dmar_drhd_unit *drhd;
3051 struct intel_iommu *iommu = NULL;
3052 unsigned long flag;
3053
3054 if (init_iommu_hw()) {
3055 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3056 return -EIO;
3057 }
3058
3059 for_each_active_iommu(iommu, drhd) {
3060
3061 spin_lock_irqsave(&iommu->register_lock, flag);
3062
3063 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3064 iommu->reg + DMAR_FECTL_REG);
3065 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3066 iommu->reg + DMAR_FEDATA_REG);
3067 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3068 iommu->reg + DMAR_FEADDR_REG);
3069 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3070 iommu->reg + DMAR_FEUADDR_REG);
3071
3072 spin_unlock_irqrestore(&iommu->register_lock, flag);
3073 }
3074
3075 for_each_active_iommu(iommu, drhd)
3076 kfree(iommu->iommu_state);
3077
3078 return 0;
3079}
3080
3081static struct sysdev_class iommu_sysclass = {
3082 .name = "iommu",
3083 .resume = iommu_resume,
3084 .suspend = iommu_suspend,
3085};
3086
3087static struct sys_device device_iommu = {
3088 .cls = &iommu_sysclass,
3089};
3090
3091static int __init init_iommu_sysfs(void)
3092{
3093 int error;
3094
3095 error = sysdev_class_register(&iommu_sysclass);
3096 if (error)
3097 return error;
3098
3099 error = sysdev_register(&device_iommu);
3100 if (error)
3101 sysdev_class_unregister(&iommu_sysclass);
3102
3103 return error;
3104}
3105
3106#else
3107static int __init init_iommu_sysfs(void)
3108{
3109 return 0;
3110}
3111#endif /* CONFIG_PM */
3112
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003113int __init intel_iommu_init(void)
3114{
3115 int ret = 0;
3116
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003117 if (dmar_table_init())
3118 return -ENODEV;
3119
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003120 if (dmar_dev_scope_init())
3121 return -ENODEV;
3122
Suresh Siddha2ae21012008-07-10 11:16:43 -07003123 /*
3124 * Check the need for DMA-remapping initialization now.
3125 * Above initialization will also be used by Interrupt-remapping.
3126 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003127 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003128 return -ENODEV;
3129
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003130 iommu_init_mempool();
3131 dmar_init_reserved_ranges();
3132
3133 init_no_remapping_devices();
3134
3135 ret = init_dmars();
3136 if (ret) {
3137 printk(KERN_ERR "IOMMU: dmar init failed\n");
3138 put_iova_domain(&reserved_iova_list);
3139 iommu_exit_mempool();
3140 return ret;
3141 }
3142 printk(KERN_INFO
3143 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3144
mark gross5e0d2a62008-03-04 15:22:08 -08003145 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003146 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003147
3148 if (!iommu_pass_through) {
3149 printk(KERN_INFO
3150 "Multi-level page-table translation for DMAR.\n");
3151 dma_ops = &intel_dma_ops;
3152 } else
3153 printk(KERN_INFO
3154 "DMAR: Pass through translation for DMAR.\n");
3155
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003156 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003157
3158 register_iommu(&intel_iommu_ops);
3159
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003160 return 0;
3161}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003162
Han, Weidong3199aa62009-02-26 17:31:12 +08003163static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3164 struct pci_dev *pdev)
3165{
3166 struct pci_dev *tmp, *parent;
3167
3168 if (!iommu || !pdev)
3169 return;
3170
3171 /* dependent device detach */
3172 tmp = pci_find_upstream_pcie_bridge(pdev);
3173 /* Secondary interface's bus number and devfn 0 */
3174 if (tmp) {
3175 parent = pdev->bus->self;
3176 while (parent != tmp) {
3177 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003178 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003179 parent = parent->bus->self;
3180 }
3181 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3182 iommu_detach_dev(iommu,
3183 tmp->subordinate->number, 0);
3184 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003185 iommu_detach_dev(iommu, tmp->bus->number,
3186 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003187 }
3188}
3189
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003190static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003191 struct pci_dev *pdev)
3192{
3193 struct device_domain_info *info;
3194 struct intel_iommu *iommu;
3195 unsigned long flags;
3196 int found = 0;
3197 struct list_head *entry, *tmp;
3198
David Woodhouse276dbf992009-04-04 01:45:37 +01003199 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3200 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003201 if (!iommu)
3202 return;
3203
3204 spin_lock_irqsave(&device_domain_lock, flags);
3205 list_for_each_safe(entry, tmp, &domain->devices) {
3206 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003207 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003208 if (info->bus == pdev->bus->number &&
3209 info->devfn == pdev->devfn) {
3210 list_del(&info->link);
3211 list_del(&info->global);
3212 if (info->dev)
3213 info->dev->dev.archdata.iommu = NULL;
3214 spin_unlock_irqrestore(&device_domain_lock, flags);
3215
Yu Zhao93a23a72009-05-18 13:51:37 +08003216 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003217 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003218 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003219 free_devinfo_mem(info);
3220
3221 spin_lock_irqsave(&device_domain_lock, flags);
3222
3223 if (found)
3224 break;
3225 else
3226 continue;
3227 }
3228
3229 /* if there is no other devices under the same iommu
3230 * owned by this domain, clear this iommu in iommu_bmp
3231 * update iommu count and coherency
3232 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003233 if (iommu == device_to_iommu(info->segment, info->bus,
3234 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003235 found = 1;
3236 }
3237
3238 if (found == 0) {
3239 unsigned long tmp_flags;
3240 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3241 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3242 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003243 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003244 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3245 }
3246
3247 spin_unlock_irqrestore(&device_domain_lock, flags);
3248}
3249
3250static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3251{
3252 struct device_domain_info *info;
3253 struct intel_iommu *iommu;
3254 unsigned long flags1, flags2;
3255
3256 spin_lock_irqsave(&device_domain_lock, flags1);
3257 while (!list_empty(&domain->devices)) {
3258 info = list_entry(domain->devices.next,
3259 struct device_domain_info, link);
3260 list_del(&info->link);
3261 list_del(&info->global);
3262 if (info->dev)
3263 info->dev->dev.archdata.iommu = NULL;
3264
3265 spin_unlock_irqrestore(&device_domain_lock, flags1);
3266
Yu Zhao93a23a72009-05-18 13:51:37 +08003267 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003268 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003269 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003270 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003271
3272 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003273 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003274 */
3275 spin_lock_irqsave(&domain->iommu_lock, flags2);
3276 if (test_and_clear_bit(iommu->seq_id,
3277 &domain->iommu_bmp)) {
3278 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003279 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003280 }
3281 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3282
3283 free_devinfo_mem(info);
3284 spin_lock_irqsave(&device_domain_lock, flags1);
3285 }
3286 spin_unlock_irqrestore(&device_domain_lock, flags1);
3287}
3288
Weidong Han5e98c4b2008-12-08 23:03:27 +08003289/* domain id for virtual machine, it won't be set in context */
3290static unsigned long vm_domid;
3291
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003292static int vm_domain_min_agaw(struct dmar_domain *domain)
3293{
3294 int i;
3295 int min_agaw = domain->agaw;
3296
3297 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3298 for (; i < g_num_of_iommus; ) {
3299 if (min_agaw > g_iommus[i]->agaw)
3300 min_agaw = g_iommus[i]->agaw;
3301
3302 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3303 }
3304
3305 return min_agaw;
3306}
3307
Weidong Han5e98c4b2008-12-08 23:03:27 +08003308static struct dmar_domain *iommu_alloc_vm_domain(void)
3309{
3310 struct dmar_domain *domain;
3311
3312 domain = alloc_domain_mem();
3313 if (!domain)
3314 return NULL;
3315
3316 domain->id = vm_domid++;
3317 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3318 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3319
3320 return domain;
3321}
3322
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003323static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003324{
3325 int adjust_width;
3326
3327 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3328 spin_lock_init(&domain->mapping_lock);
3329 spin_lock_init(&domain->iommu_lock);
3330
3331 domain_reserve_special_ranges(domain);
3332
3333 /* calculate AGAW */
3334 domain->gaw = guest_width;
3335 adjust_width = guestwidth_to_adjustwidth(guest_width);
3336 domain->agaw = width_to_agaw(adjust_width);
3337
3338 INIT_LIST_HEAD(&domain->devices);
3339
3340 domain->iommu_count = 0;
3341 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003342 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003343
3344 /* always allocate the top pgd */
3345 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3346 if (!domain->pgd)
3347 return -ENOMEM;
3348 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3349 return 0;
3350}
3351
3352static void iommu_free_vm_domain(struct dmar_domain *domain)
3353{
3354 unsigned long flags;
3355 struct dmar_drhd_unit *drhd;
3356 struct intel_iommu *iommu;
3357 unsigned long i;
3358 unsigned long ndomains;
3359
3360 for_each_drhd_unit(drhd) {
3361 if (drhd->ignored)
3362 continue;
3363 iommu = drhd->iommu;
3364
3365 ndomains = cap_ndoms(iommu->cap);
3366 i = find_first_bit(iommu->domain_ids, ndomains);
3367 for (; i < ndomains; ) {
3368 if (iommu->domains[i] == domain) {
3369 spin_lock_irqsave(&iommu->lock, flags);
3370 clear_bit(i, iommu->domain_ids);
3371 iommu->domains[i] = NULL;
3372 spin_unlock_irqrestore(&iommu->lock, flags);
3373 break;
3374 }
3375 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3376 }
3377 }
3378}
3379
3380static void vm_domain_exit(struct dmar_domain *domain)
3381{
3382 u64 end;
3383
3384 /* Domain 0 is reserved, so dont process it */
3385 if (!domain)
3386 return;
3387
3388 vm_domain_remove_all_dev_info(domain);
3389 /* destroy iovas */
3390 put_iova_domain(&domain->iovad);
3391 end = DOMAIN_MAX_ADDR(domain->gaw);
3392 end = end & (~VTD_PAGE_MASK);
3393
3394 /* clear ptes */
3395 dma_pte_clear_range(domain, 0, end);
3396
3397 /* free page tables */
3398 dma_pte_free_pagetable(domain, 0, end);
3399
3400 iommu_free_vm_domain(domain);
3401 free_domain_mem(domain);
3402}
3403
Joerg Roedel5d450802008-12-03 14:52:32 +01003404static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003405{
Joerg Roedel5d450802008-12-03 14:52:32 +01003406 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003407
Joerg Roedel5d450802008-12-03 14:52:32 +01003408 dmar_domain = iommu_alloc_vm_domain();
3409 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003410 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003411 "intel_iommu_domain_init: dmar_domain == NULL\n");
3412 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003413 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003414 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003415 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003416 "intel_iommu_domain_init() failed\n");
3417 vm_domain_exit(dmar_domain);
3418 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003419 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003420 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003421
Joerg Roedel5d450802008-12-03 14:52:32 +01003422 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003423}
Kay, Allen M38717942008-09-09 18:37:29 +03003424
Joerg Roedel5d450802008-12-03 14:52:32 +01003425static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003426{
Joerg Roedel5d450802008-12-03 14:52:32 +01003427 struct dmar_domain *dmar_domain = domain->priv;
3428
3429 domain->priv = NULL;
3430 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003431}
Kay, Allen M38717942008-09-09 18:37:29 +03003432
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003433static int intel_iommu_attach_device(struct iommu_domain *domain,
3434 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003435{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003436 struct dmar_domain *dmar_domain = domain->priv;
3437 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003438 struct intel_iommu *iommu;
3439 int addr_width;
3440 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003441 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003442
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003443 /* normally pdev is not mapped */
3444 if (unlikely(domain_context_mapped(pdev))) {
3445 struct dmar_domain *old_domain;
3446
3447 old_domain = find_domain(pdev);
3448 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003449 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3450 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3451 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003452 else
3453 domain_remove_dev_info(old_domain);
3454 }
3455 }
3456
David Woodhouse276dbf992009-04-04 01:45:37 +01003457 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3458 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003459 if (!iommu)
3460 return -ENODEV;
3461
3462 /* check if this iommu agaw is sufficient for max mapped address */
3463 addr_width = agaw_to_width(iommu->agaw);
3464 end = DOMAIN_MAX_ADDR(addr_width);
3465 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003466 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003467 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3468 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003469 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003470 return -EFAULT;
3471 }
3472
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003473 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003474 if (ret)
3475 return ret;
3476
Yu Zhao93a23a72009-05-18 13:51:37 +08003477 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003478 return ret;
3479}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003480
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003481static void intel_iommu_detach_device(struct iommu_domain *domain,
3482 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003483{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003484 struct dmar_domain *dmar_domain = domain->priv;
3485 struct pci_dev *pdev = to_pci_dev(dev);
3486
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003487 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003488}
Kay, Allen M38717942008-09-09 18:37:29 +03003489
Joerg Roedeldde57a22008-12-03 15:04:09 +01003490static int intel_iommu_map_range(struct iommu_domain *domain,
3491 unsigned long iova, phys_addr_t hpa,
3492 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003493{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003494 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003495 u64 max_addr;
3496 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003497 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003498 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003499
Joerg Roedeldde57a22008-12-03 15:04:09 +01003500 if (iommu_prot & IOMMU_READ)
3501 prot |= DMA_PTE_READ;
3502 if (iommu_prot & IOMMU_WRITE)
3503 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003504 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3505 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003506
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003507 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003508 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003509 int min_agaw;
3510 u64 end;
3511
3512 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003513 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003514 addr_width = agaw_to_width(min_agaw);
3515 end = DOMAIN_MAX_ADDR(addr_width);
3516 end = end & VTD_PAGE_MASK;
3517 if (end < max_addr) {
3518 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3519 "sufficient for the mapped address (%llx)\n",
3520 __func__, min_agaw, max_addr);
3521 return -EFAULT;
3522 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003523 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003524 }
3525
Joerg Roedeldde57a22008-12-03 15:04:09 +01003526 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003527 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003528}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003529
Joerg Roedeldde57a22008-12-03 15:04:09 +01003530static void intel_iommu_unmap_range(struct iommu_domain *domain,
3531 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003532{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003533 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003534 dma_addr_t base;
3535
3536 /* The address might not be aligned */
3537 base = iova & VTD_PAGE_MASK;
3538 size = VTD_PAGE_ALIGN(size);
Joerg Roedeldde57a22008-12-03 15:04:09 +01003539 dma_pte_clear_range(dmar_domain, base, base + size);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003540
Joerg Roedeldde57a22008-12-03 15:04:09 +01003541 if (dmar_domain->max_addr == base + size)
3542 dmar_domain->max_addr = base;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003543}
Kay, Allen M38717942008-09-09 18:37:29 +03003544
Joerg Roedeld14d6572008-12-03 15:06:57 +01003545static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3546 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003547{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003548 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003549 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003550 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003551
Joerg Roedeld14d6572008-12-03 15:06:57 +01003552 pte = addr_to_dma_pte(dmar_domain, iova);
Kay, Allen M38717942008-09-09 18:37:29 +03003553 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003554 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003555
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003556 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003557}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003558
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003559static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3560 unsigned long cap)
3561{
3562 struct dmar_domain *dmar_domain = domain->priv;
3563
3564 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3565 return dmar_domain->iommu_snooping;
3566
3567 return 0;
3568}
3569
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003570static struct iommu_ops intel_iommu_ops = {
3571 .domain_init = intel_iommu_domain_init,
3572 .domain_destroy = intel_iommu_domain_destroy,
3573 .attach_dev = intel_iommu_attach_device,
3574 .detach_dev = intel_iommu_detach_device,
3575 .map = intel_iommu_map_range,
3576 .unmap = intel_iommu_unmap_range,
3577 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003578 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003579};
David Woodhouse9af88142009-02-13 23:18:03 +00003580
3581static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3582{
3583 /*
3584 * Mobile 4 Series Chipset neglects to set RWBF capability,
3585 * but needs it:
3586 */
3587 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3588 rwbf_quirk = 1;
3589}
3590
3591DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);