blob: 02223e2e27d4380e34ed49b17484ac7374e30a8e [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070040#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090041#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include "pci.h"
43
Fenghua Yu5b6985c2008-10-16 18:02:32 -070044#define ROOT_SIZE VTD_PAGE_SIZE
45#define CONTEXT_SIZE VTD_PAGE_SIZE
46
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070047#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
49
50#define IOAPIC_RANGE_START (0xfee00000)
51#define IOAPIC_RANGE_END (0xfeefffff)
52#define IOVA_START_ADDR (0x1000)
53
54#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
55
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070056#define MAX_AGAW_WIDTH 64
57
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070058#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
David Woodhouse595badf2009-06-27 22:09:11 +010059#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070060
Mark McLoughlinf27be032008-11-20 15:49:43 +000061#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070062#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070063#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080064
David Woodhousefd18de52009-05-10 23:57:41 +010065
David Woodhousedd4e8312009-06-27 16:21:20 +010066/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
69{
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
71}
72
73static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
74{
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
76}
77static inline unsigned long page_to_dma_pfn(struct page *pg)
78{
79 return mm_to_dma_pfn(page_to_pfn(pg));
80}
81static inline unsigned long virt_to_dma_pfn(void *p)
82{
83 return page_to_dma_pfn(virt_to_page(p));
84}
85
Weidong Hand9630fe2008-12-08 11:06:32 +080086/* global iommu list, set NULL for ignored DMAR units */
87static struct intel_iommu **g_iommus;
88
David Woodhouse9af88142009-02-13 23:18:03 +000089static int rwbf_quirk;
90
Mark McLoughlin46b08e12008-11-20 15:49:44 +000091/*
92 * 0: Present
93 * 1-11: Reserved
94 * 12-63: Context Ptr (12 - (haw-1))
95 * 64-127: Reserved
96 */
97struct root_entry {
98 u64 val;
99 u64 rsvd1;
100};
101#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102static inline bool root_present(struct root_entry *root)
103{
104 return (root->val & 1);
105}
106static inline void set_root_present(struct root_entry *root)
107{
108 root->val |= 1;
109}
110static inline void set_root_value(struct root_entry *root, unsigned long value)
111{
112 root->val |= value & VTD_PAGE_MASK;
113}
114
115static inline struct context_entry *
116get_context_addr_from_root(struct root_entry *root)
117{
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
121 NULL);
122}
123
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000124/*
125 * low 64 bits:
126 * 0: present
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
130 * high 64 bits:
131 * 0-2: address width
132 * 3-6: aval
133 * 8-23: domain id
134 */
135struct context_entry {
136 u64 lo;
137 u64 hi;
138};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000139
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000140static inline bool context_present(struct context_entry *context)
141{
142 return (context->lo & 1);
143}
144static inline void context_set_present(struct context_entry *context)
145{
146 context->lo |= 1;
147}
148
149static inline void context_set_fault_enable(struct context_entry *context)
150{
151 context->lo &= (((u64)-1) << 2) | 1;
152}
153
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000154static inline void context_set_translation_type(struct context_entry *context,
155 unsigned long value)
156{
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
159}
160
161static inline void context_set_address_root(struct context_entry *context,
162 unsigned long value)
163{
164 context->lo |= value & VTD_PAGE_MASK;
165}
166
167static inline void context_set_address_width(struct context_entry *context,
168 unsigned long value)
169{
170 context->hi |= value & 7;
171}
172
173static inline void context_set_domain_id(struct context_entry *context,
174 unsigned long value)
175{
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
177}
178
179static inline void context_clear_entry(struct context_entry *context)
180{
181 context->lo = 0;
182 context->hi = 0;
183}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000184
Mark McLoughlin622ba122008-11-20 15:49:46 +0000185/*
186 * 0: readable
187 * 1: writable
188 * 2-6: reserved
189 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800190 * 8-10: available
191 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000192 * 12-63: Host physcial address
193 */
194struct dma_pte {
195 u64 val;
196};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000197
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000198static inline void dma_clear_pte(struct dma_pte *pte)
199{
200 pte->val = 0;
201}
202
203static inline void dma_set_pte_readable(struct dma_pte *pte)
204{
205 pte->val |= DMA_PTE_READ;
206}
207
208static inline void dma_set_pte_writable(struct dma_pte *pte)
209{
210 pte->val |= DMA_PTE_WRITE;
211}
212
Sheng Yang9cf066972009-03-18 15:33:07 +0800213static inline void dma_set_pte_snp(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_SNP;
216}
217
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000218static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
219{
220 pte->val = (pte->val & ~3) | (prot & 3);
221}
222
223static inline u64 dma_pte_addr(struct dma_pte *pte)
224{
David Woodhousec85994e2009-07-01 19:21:24 +0100225#ifdef CONFIG_64BIT
226 return pte->val & VTD_PAGE_MASK;
227#else
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
230#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000231}
232
David Woodhousedd4e8312009-06-27 16:21:20 +0100233static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000234{
David Woodhousedd4e8312009-06-27 16:21:20 +0100235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000236}
237
238static inline bool dma_pte_present(struct dma_pte *pte)
239{
240 return (pte->val & 3) != 0;
241}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000242
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700243/*
244 * This domain is a statically identity mapping domain.
245 * 1. This domain creats a static 1:1 mapping to all usable memory.
246 * 2. It maps to each iommu if successful.
247 * 3. Each iommu mapps to this domain if successful.
248 */
249struct dmar_domain *si_domain;
250
Weidong Han3b5410e2008-12-08 09:17:15 +0800251/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100252#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800253
Weidong Han1ce28fe2008-12-08 16:35:39 +0800254/* domain represents a virtual machine, more than one devices
255 * across iommus may be owned in one domain, e.g. kvm guest.
256 */
257#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
258
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700259/* si_domain contains mulitple devices */
260#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
261
Mark McLoughlin99126f72008-11-20 15:49:47 +0000262struct dmar_domain {
263 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800264 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000265
266 struct list_head devices; /* all devices' list */
267 struct iova_domain iovad; /* iova's that belong to this domain */
268
269 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000270 int gaw; /* max guest address width */
271
272 /* adjusted guest address width, 0 is level 2 30-bit */
273 int agaw;
274
Weidong Han3b5410e2008-12-08 09:17:15 +0800275 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800276
277 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800278 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800279 int iommu_count; /* reference count of iommu */
280 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800281 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000282};
283
Mark McLoughlina647dac2008-11-20 15:49:48 +0000284/* PCI domain-device relationship */
285struct device_domain_info {
286 struct list_head link; /* link to domain siblings */
287 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100288 int segment; /* PCI domain */
289 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000290 u8 devfn; /* PCI devfn number */
291 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800292 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000293 struct dmar_domain *domain; /* pointer to domain */
294};
295
mark gross5e0d2a62008-03-04 15:22:08 -0800296static void flush_unmaps_timeout(unsigned long data);
297
298DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
299
mark gross80b20dd2008-04-18 13:53:58 -0700300#define HIGH_WATER_MARK 250
301struct deferred_flush_tables {
302 int next;
303 struct iova *iova[HIGH_WATER_MARK];
304 struct dmar_domain *domain[HIGH_WATER_MARK];
305};
306
307static struct deferred_flush_tables *deferred_flush;
308
mark gross5e0d2a62008-03-04 15:22:08 -0800309/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800310static int g_num_of_iommus;
311
312static DEFINE_SPINLOCK(async_umap_flush_lock);
313static LIST_HEAD(unmaps_to_do);
314
315static int timer_on;
316static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800317
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700318static void domain_remove_dev_info(struct dmar_domain *domain);
319
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800320#ifdef CONFIG_DMAR_DEFAULT_ON
321int dmar_disabled = 0;
322#else
323int dmar_disabled = 1;
324#endif /*CONFIG_DMAR_DEFAULT_ON*/
325
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700326static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700327static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800328static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700329
330#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
331static DEFINE_SPINLOCK(device_domain_lock);
332static LIST_HEAD(device_domain_list);
333
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100334static struct iommu_ops intel_iommu_ops;
335
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700336static int __init intel_iommu_setup(char *str)
337{
338 if (!str)
339 return -EINVAL;
340 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800341 if (!strncmp(str, "on", 2)) {
342 dmar_disabled = 0;
343 printk(KERN_INFO "Intel-IOMMU: enabled\n");
344 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700345 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800346 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700347 } else if (!strncmp(str, "igfx_off", 8)) {
348 dmar_map_gfx = 0;
349 printk(KERN_INFO
350 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700351 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800352 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700353 "Intel-IOMMU: Forcing DAC for PCI devices\n");
354 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800355 } else if (!strncmp(str, "strict", 6)) {
356 printk(KERN_INFO
357 "Intel-IOMMU: disable batched IOTLB flush\n");
358 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700359 }
360
361 str += strcspn(str, ",");
362 while (*str == ',')
363 str++;
364 }
365 return 0;
366}
367__setup("intel_iommu=", intel_iommu_setup);
368
369static struct kmem_cache *iommu_domain_cache;
370static struct kmem_cache *iommu_devinfo_cache;
371static struct kmem_cache *iommu_iova_cache;
372
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700373static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
374{
375 unsigned int flags;
376 void *vaddr;
377
378 /* trying to avoid low memory issues */
379 flags = current->flags & PF_MEMALLOC;
380 current->flags |= PF_MEMALLOC;
381 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
382 current->flags &= (~PF_MEMALLOC | flags);
383 return vaddr;
384}
385
386
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700387static inline void *alloc_pgtable_page(void)
388{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700389 unsigned int flags;
390 void *vaddr;
391
392 /* trying to avoid low memory issues */
393 flags = current->flags & PF_MEMALLOC;
394 current->flags |= PF_MEMALLOC;
395 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
396 current->flags &= (~PF_MEMALLOC | flags);
397 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700398}
399
400static inline void free_pgtable_page(void *vaddr)
401{
402 free_page((unsigned long)vaddr);
403}
404
405static inline void *alloc_domain_mem(void)
406{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700407 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700408}
409
Kay, Allen M38717942008-09-09 18:37:29 +0300410static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700411{
412 kmem_cache_free(iommu_domain_cache, vaddr);
413}
414
415static inline void * alloc_devinfo_mem(void)
416{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700417 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700418}
419
420static inline void free_devinfo_mem(void *vaddr)
421{
422 kmem_cache_free(iommu_devinfo_cache, vaddr);
423}
424
425struct iova *alloc_iova_mem(void)
426{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700427 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700428}
429
430void free_iova_mem(struct iova *iova)
431{
432 kmem_cache_free(iommu_iova_cache, iova);
433}
434
Weidong Han1b573682008-12-08 15:34:06 +0800435
436static inline int width_to_agaw(int width);
437
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700438static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800439{
440 unsigned long sagaw;
441 int agaw = -1;
442
443 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700444 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800445 agaw >= 0; agaw--) {
446 if (test_bit(agaw, &sagaw))
447 break;
448 }
449
450 return agaw;
451}
452
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700453/*
454 * Calculate max SAGAW for each iommu.
455 */
456int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
457{
458 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
459}
460
461/*
462 * calculate agaw for each iommu.
463 * "SAGAW" may be different across iommus, use a default agaw, and
464 * get a supported less agaw for iommus that don't support the default agaw.
465 */
466int iommu_calculate_agaw(struct intel_iommu *iommu)
467{
468 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
469}
470
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700471/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800472static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
473{
474 int iommu_id;
475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700476 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800477 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700478 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800479
Weidong Han8c11e792008-12-08 15:29:22 +0800480 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
481 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
482 return NULL;
483
484 return g_iommus[iommu_id];
485}
486
Weidong Han8e6040972008-12-08 15:49:06 +0800487static void domain_update_iommu_coherency(struct dmar_domain *domain)
488{
489 int i;
490
491 domain->iommu_coherency = 1;
492
493 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
494 for (; i < g_num_of_iommus; ) {
495 if (!ecap_coherent(g_iommus[i]->ecap)) {
496 domain->iommu_coherency = 0;
497 break;
498 }
499 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
500 }
501}
502
Sheng Yang58c610b2009-03-18 15:33:05 +0800503static void domain_update_iommu_snooping(struct dmar_domain *domain)
504{
505 int i;
506
507 domain->iommu_snooping = 1;
508
509 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
510 for (; i < g_num_of_iommus; ) {
511 if (!ecap_sc_support(g_iommus[i]->ecap)) {
512 domain->iommu_snooping = 0;
513 break;
514 }
515 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
516 }
517}
518
519/* Some capabilities may be different across iommus */
520static void domain_update_iommu_cap(struct dmar_domain *domain)
521{
522 domain_update_iommu_coherency(domain);
523 domain_update_iommu_snooping(domain);
524}
525
David Woodhouse276dbf992009-04-04 01:45:37 +0100526static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800527{
528 struct dmar_drhd_unit *drhd = NULL;
529 int i;
530
531 for_each_drhd_unit(drhd) {
532 if (drhd->ignored)
533 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100534 if (segment != drhd->segment)
535 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800536
David Woodhouse924b6232009-04-04 00:39:25 +0100537 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000538 if (drhd->devices[i] &&
539 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800540 drhd->devices[i]->devfn == devfn)
541 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700542 if (drhd->devices[i] &&
543 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100544 drhd->devices[i]->subordinate->number <= bus &&
545 drhd->devices[i]->subordinate->subordinate >= bus)
546 return drhd->iommu;
547 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800548
549 if (drhd->include_all)
550 return drhd->iommu;
551 }
552
553 return NULL;
554}
555
Weidong Han5331fe62008-12-08 23:00:00 +0800556static void domain_flush_cache(struct dmar_domain *domain,
557 void *addr, int size)
558{
559 if (!domain->iommu_coherency)
560 clflush_cache_range(addr, size);
561}
562
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700563/* Gets context entry for a given bus and devfn */
564static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
565 u8 bus, u8 devfn)
566{
567 struct root_entry *root;
568 struct context_entry *context;
569 unsigned long phy_addr;
570 unsigned long flags;
571
572 spin_lock_irqsave(&iommu->lock, flags);
573 root = &iommu->root_entry[bus];
574 context = get_context_addr_from_root(root);
575 if (!context) {
576 context = (struct context_entry *)alloc_pgtable_page();
577 if (!context) {
578 spin_unlock_irqrestore(&iommu->lock, flags);
579 return NULL;
580 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700581 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700582 phy_addr = virt_to_phys((void *)context);
583 set_root_value(root, phy_addr);
584 set_root_present(root);
585 __iommu_flush_cache(iommu, root, sizeof(*root));
586 }
587 spin_unlock_irqrestore(&iommu->lock, flags);
588 return &context[devfn];
589}
590
591static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
592{
593 struct root_entry *root;
594 struct context_entry *context;
595 int ret;
596 unsigned long flags;
597
598 spin_lock_irqsave(&iommu->lock, flags);
599 root = &iommu->root_entry[bus];
600 context = get_context_addr_from_root(root);
601 if (!context) {
602 ret = 0;
603 goto out;
604 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000605 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700606out:
607 spin_unlock_irqrestore(&iommu->lock, flags);
608 return ret;
609}
610
611static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
612{
613 struct root_entry *root;
614 struct context_entry *context;
615 unsigned long flags;
616
617 spin_lock_irqsave(&iommu->lock, flags);
618 root = &iommu->root_entry[bus];
619 context = get_context_addr_from_root(root);
620 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000621 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700622 __iommu_flush_cache(iommu, &context[devfn], \
623 sizeof(*context));
624 }
625 spin_unlock_irqrestore(&iommu->lock, flags);
626}
627
628static void free_context_table(struct intel_iommu *iommu)
629{
630 struct root_entry *root;
631 int i;
632 unsigned long flags;
633 struct context_entry *context;
634
635 spin_lock_irqsave(&iommu->lock, flags);
636 if (!iommu->root_entry) {
637 goto out;
638 }
639 for (i = 0; i < ROOT_ENTRY_NR; i++) {
640 root = &iommu->root_entry[i];
641 context = get_context_addr_from_root(root);
642 if (context)
643 free_pgtable_page(context);
644 }
645 free_pgtable_page(iommu->root_entry);
646 iommu->root_entry = NULL;
647out:
648 spin_unlock_irqrestore(&iommu->lock, flags);
649}
650
651/* page table handling */
652#define LEVEL_STRIDE (9)
653#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
654
655static inline int agaw_to_level(int agaw)
656{
657 return agaw + 2;
658}
659
660static inline int agaw_to_width(int agaw)
661{
662 return 30 + agaw * LEVEL_STRIDE;
663
664}
665
666static inline int width_to_agaw(int width)
667{
668 return (width - 30) / LEVEL_STRIDE;
669}
670
671static inline unsigned int level_to_offset_bits(int level)
672{
David Woodhouse6660c632009-06-27 22:41:00 +0100673 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700674}
675
David Woodhouse77dfa562009-06-27 16:40:08 +0100676static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700677{
David Woodhouse6660c632009-06-27 22:41:00 +0100678 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700679}
680
David Woodhouse6660c632009-06-27 22:41:00 +0100681static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700682{
David Woodhouse6660c632009-06-27 22:41:00 +0100683 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700684}
685
David Woodhouse6660c632009-06-27 22:41:00 +0100686static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700687{
David Woodhouse6660c632009-06-27 22:41:00 +0100688 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700689}
690
David Woodhouse6660c632009-06-27 22:41:00 +0100691static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700692{
David Woodhouse6660c632009-06-27 22:41:00 +0100693 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694}
695
David Woodhouseb026fd22009-06-28 10:37:25 +0100696static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
697 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698{
David Woodhouseb026fd22009-06-28 10:37:25 +0100699 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700 struct dma_pte *parent, *pte = NULL;
701 int level = agaw_to_level(domain->agaw);
702 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703
704 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100705 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 parent = domain->pgd;
707
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700708 while (level > 0) {
709 void *tmp_page;
710
David Woodhouseb026fd22009-06-28 10:37:25 +0100711 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700712 pte = &parent[offset];
713 if (level == 1)
714 break;
715
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000716 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100717 uint64_t pteval;
718
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700719 tmp_page = alloc_pgtable_page();
720
David Woodhouse206a73c12009-07-01 19:30:28 +0100721 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700722 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100723
David Woodhousec85994e2009-07-01 19:21:24 +0100724 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
725 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
726 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
727 /* Someone else set it while we were thinking; use theirs. */
728 free_pgtable_page(tmp_page);
729 } else {
730 dma_pte_addr(pte);
731 domain_flush_cache(domain, pte, sizeof(*pte));
732 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700733 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000734 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 level--;
736 }
737
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700738 return pte;
739}
740
741/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100742static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
743 unsigned long pfn,
744 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700745{
746 struct dma_pte *parent, *pte = NULL;
747 int total = agaw_to_level(domain->agaw);
748 int offset;
749
750 parent = domain->pgd;
751 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100752 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700753 pte = &parent[offset];
754 if (level == total)
755 return pte;
756
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000757 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000759 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700760 total--;
761 }
762 return NULL;
763}
764
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100766static void dma_pte_clear_range(struct dmar_domain *domain,
767 unsigned long start_pfn,
768 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700769{
David Woodhouse04b18e62009-06-27 19:15:01 +0100770 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100771 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772
David Woodhouse04b18e62009-06-27 19:15:01 +0100773 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100774 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse66eae842009-06-27 19:00:32 +0100775
David Woodhouse04b18e62009-06-27 19:15:01 +0100776 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse595badf2009-06-27 22:09:11 +0100777 while (start_pfn <= last_pfn) {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100778 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
779 if (!pte) {
780 start_pfn = align_to_level(start_pfn + 1, 2);
781 continue;
782 }
783 while (start_pfn <= last_pfn &&
784 (unsigned long)pte >> VTD_PAGE_SHIFT ==
785 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
786 dma_clear_pte(pte);
787 start_pfn++;
788 pte++;
789 }
790 domain_flush_cache(domain, first_pte,
791 (void *)pte - (void *)first_pte);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700792 }
793}
794
795/* free page table pages. last level pte should already be cleared */
796static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100797 unsigned long start_pfn,
798 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799{
David Woodhouse6660c632009-06-27 22:41:00 +0100800 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100801 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802 int total = agaw_to_level(domain->agaw);
803 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100804 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805
David Woodhouse6660c632009-06-27 22:41:00 +0100806 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
807 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808
David Woodhousef3a0a522009-06-30 03:40:07 +0100809 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810 level = 2;
811 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100812 tmp = align_to_level(start_pfn, level);
813
David Woodhousef3a0a522009-06-30 03:40:07 +0100814 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100815 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816 return;
817
David Woodhouse3d7b0e42009-06-30 03:38:09 +0100818 while (tmp + level_size(level) - 1 <= last_pfn) {
David Woodhousef3a0a522009-06-30 03:40:07 +0100819 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
820 if (!pte) {
821 tmp = align_to_level(tmp + 1, level + 1);
822 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700823 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100824 while (tmp + level_size(level) - 1 <= last_pfn &&
825 (unsigned long)pte >> VTD_PAGE_SHIFT ==
826 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
827 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
828 dma_clear_pte(pte);
829 pte++;
830 tmp += level_size(level);
831 }
832 domain_flush_cache(domain, first_pte,
833 (void *)pte - (void *)first_pte);
834
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 }
836 level++;
837 }
838 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100839 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840 free_pgtable_page(domain->pgd);
841 domain->pgd = NULL;
842 }
843}
844
845/* iommu handling */
846static int iommu_alloc_root_entry(struct intel_iommu *iommu)
847{
848 struct root_entry *root;
849 unsigned long flags;
850
851 root = (struct root_entry *)alloc_pgtable_page();
852 if (!root)
853 return -ENOMEM;
854
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700855 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856
857 spin_lock_irqsave(&iommu->lock, flags);
858 iommu->root_entry = root;
859 spin_unlock_irqrestore(&iommu->lock, flags);
860
861 return 0;
862}
863
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700864static void iommu_set_root_entry(struct intel_iommu *iommu)
865{
866 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100867 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700868 unsigned long flag;
869
870 addr = iommu->root_entry;
871
872 spin_lock_irqsave(&iommu->register_lock, flag);
873 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
874
David Woodhousec416daa2009-05-10 20:30:58 +0100875 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876
877 /* Make sure hardware complete it */
878 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100879 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700880
881 spin_unlock_irqrestore(&iommu->register_lock, flag);
882}
883
884static void iommu_flush_write_buffer(struct intel_iommu *iommu)
885{
886 u32 val;
887 unsigned long flag;
888
David Woodhouse9af88142009-02-13 23:18:03 +0000889 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700890 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700891
892 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100893 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894
895 /* Make sure hardware complete it */
896 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100897 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700898
899 spin_unlock_irqrestore(&iommu->register_lock, flag);
900}
901
902/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100903static void __iommu_flush_context(struct intel_iommu *iommu,
904 u16 did, u16 source_id, u8 function_mask,
905 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700906{
907 u64 val = 0;
908 unsigned long flag;
909
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 switch (type) {
911 case DMA_CCMD_GLOBAL_INVL:
912 val = DMA_CCMD_GLOBAL_INVL;
913 break;
914 case DMA_CCMD_DOMAIN_INVL:
915 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
916 break;
917 case DMA_CCMD_DEVICE_INVL:
918 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
919 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
920 break;
921 default:
922 BUG();
923 }
924 val |= DMA_CCMD_ICC;
925
926 spin_lock_irqsave(&iommu->register_lock, flag);
927 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
928
929 /* Make sure hardware complete it */
930 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
931 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
932
933 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934}
935
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700936/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100937static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
938 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939{
940 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
941 u64 val = 0, val_iva = 0;
942 unsigned long flag;
943
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944 switch (type) {
945 case DMA_TLB_GLOBAL_FLUSH:
946 /* global flush doesn't need set IVA_REG */
947 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
948 break;
949 case DMA_TLB_DSI_FLUSH:
950 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
951 break;
952 case DMA_TLB_PSI_FLUSH:
953 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
954 /* Note: always flush non-leaf currently */
955 val_iva = size_order | addr;
956 break;
957 default:
958 BUG();
959 }
960 /* Note: set drain read/write */
961#if 0
962 /*
963 * This is probably to be super secure.. Looks like we can
964 * ignore it without any impact.
965 */
966 if (cap_read_drain(iommu->cap))
967 val |= DMA_TLB_READ_DRAIN;
968#endif
969 if (cap_write_drain(iommu->cap))
970 val |= DMA_TLB_WRITE_DRAIN;
971
972 spin_lock_irqsave(&iommu->register_lock, flag);
973 /* Note: Only uses first TLB reg currently */
974 if (val_iva)
975 dmar_writeq(iommu->reg + tlb_offset, val_iva);
976 dmar_writeq(iommu->reg + tlb_offset + 8, val);
977
978 /* Make sure hardware complete it */
979 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
980 dmar_readq, (!(val & DMA_TLB_IVT)), val);
981
982 spin_unlock_irqrestore(&iommu->register_lock, flag);
983
984 /* check IOTLB invalidation granularity */
985 if (DMA_TLB_IAIG(val) == 0)
986 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
987 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
988 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700989 (unsigned long long)DMA_TLB_IIRG(type),
990 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700991}
992
Yu Zhao93a23a72009-05-18 13:51:37 +0800993static struct device_domain_info *iommu_support_dev_iotlb(
994 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995{
Yu Zhao93a23a72009-05-18 13:51:37 +0800996 int found = 0;
997 unsigned long flags;
998 struct device_domain_info *info;
999 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1000
1001 if (!ecap_dev_iotlb_support(iommu->ecap))
1002 return NULL;
1003
1004 if (!iommu->qi)
1005 return NULL;
1006
1007 spin_lock_irqsave(&device_domain_lock, flags);
1008 list_for_each_entry(info, &domain->devices, link)
1009 if (info->bus == bus && info->devfn == devfn) {
1010 found = 1;
1011 break;
1012 }
1013 spin_unlock_irqrestore(&device_domain_lock, flags);
1014
1015 if (!found || !info->dev)
1016 return NULL;
1017
1018 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1019 return NULL;
1020
1021 if (!dmar_find_matched_atsr_unit(info->dev))
1022 return NULL;
1023
1024 info->iommu = iommu;
1025
1026 return info;
1027}
1028
1029static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1030{
1031 if (!info)
1032 return;
1033
1034 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1035}
1036
1037static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1038{
1039 if (!info->dev || !pci_ats_enabled(info->dev))
1040 return;
1041
1042 pci_disable_ats(info->dev);
1043}
1044
1045static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1046 u64 addr, unsigned mask)
1047{
1048 u16 sid, qdep;
1049 unsigned long flags;
1050 struct device_domain_info *info;
1051
1052 spin_lock_irqsave(&device_domain_lock, flags);
1053 list_for_each_entry(info, &domain->devices, link) {
1054 if (!info->dev || !pci_ats_enabled(info->dev))
1055 continue;
1056
1057 sid = info->bus << 8 | info->devfn;
1058 qdep = pci_ats_queue_depth(info->dev);
1059 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1060 }
1061 spin_unlock_irqrestore(&device_domain_lock, flags);
1062}
1063
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001064static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001065 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001067 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001068 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001069
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070 BUG_ON(pages == 0);
1071
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001073 * Fallback to domain selective flush if no PSI support or the size is
1074 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001075 * PSI requires page size to be 2 ^ x, and the base address is naturally
1076 * aligned to the size
1077 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001078 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1079 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001080 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001081 else
1082 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1083 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001084
1085 /*
1086 * In caching mode, domain ID 0 is reserved for non-present to present
1087 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1088 */
1089 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001090 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001091}
1092
mark grossf8bab732008-02-08 04:18:38 -08001093static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1094{
1095 u32 pmen;
1096 unsigned long flags;
1097
1098 spin_lock_irqsave(&iommu->register_lock, flags);
1099 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1100 pmen &= ~DMA_PMEN_EPM;
1101 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1102
1103 /* wait for the protected region status bit to clear */
1104 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1105 readl, !(pmen & DMA_PMEN_PRS), pmen);
1106
1107 spin_unlock_irqrestore(&iommu->register_lock, flags);
1108}
1109
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001110static int iommu_enable_translation(struct intel_iommu *iommu)
1111{
1112 u32 sts;
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001116 iommu->gcmd |= DMA_GCMD_TE;
1117 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001118
1119 /* Make sure hardware complete it */
1120 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001121 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001122
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001123 spin_unlock_irqrestore(&iommu->register_lock, flags);
1124 return 0;
1125}
1126
1127static int iommu_disable_translation(struct intel_iommu *iommu)
1128{
1129 u32 sts;
1130 unsigned long flag;
1131
1132 spin_lock_irqsave(&iommu->register_lock, flag);
1133 iommu->gcmd &= ~DMA_GCMD_TE;
1134 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1135
1136 /* Make sure hardware complete it */
1137 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001138 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001139
1140 spin_unlock_irqrestore(&iommu->register_lock, flag);
1141 return 0;
1142}
1143
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001144
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001145static int iommu_init_domains(struct intel_iommu *iommu)
1146{
1147 unsigned long ndomains;
1148 unsigned long nlongs;
1149
1150 ndomains = cap_ndoms(iommu->cap);
1151 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1152 nlongs = BITS_TO_LONGS(ndomains);
1153
1154 /* TBD: there might be 64K domains,
1155 * consider other allocation for future chip
1156 */
1157 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1158 if (!iommu->domain_ids) {
1159 printk(KERN_ERR "Allocating domain id array failed\n");
1160 return -ENOMEM;
1161 }
1162 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1163 GFP_KERNEL);
1164 if (!iommu->domains) {
1165 printk(KERN_ERR "Allocating domain array failed\n");
1166 kfree(iommu->domain_ids);
1167 return -ENOMEM;
1168 }
1169
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001170 spin_lock_init(&iommu->lock);
1171
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001172 /*
1173 * if Caching mode is set, then invalid translations are tagged
1174 * with domainid 0. Hence we need to pre-allocate it.
1175 */
1176 if (cap_caching_mode(iommu->cap))
1177 set_bit(0, iommu->domain_ids);
1178 return 0;
1179}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181
1182static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001183static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001184
1185void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001186{
1187 struct dmar_domain *domain;
1188 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001189 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001190
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001191 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1192 for (; i < cap_ndoms(iommu->cap); ) {
1193 domain = iommu->domains[i];
1194 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001195
1196 spin_lock_irqsave(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001197 if (--domain->iommu_count == 0) {
1198 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1199 vm_domain_exit(domain);
1200 else
1201 domain_exit(domain);
1202 }
Weidong Hanc7151a82008-12-08 22:51:37 +08001203 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1204
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001205 i = find_next_bit(iommu->domain_ids,
1206 cap_ndoms(iommu->cap), i+1);
1207 }
1208
1209 if (iommu->gcmd & DMA_GCMD_TE)
1210 iommu_disable_translation(iommu);
1211
1212 if (iommu->irq) {
1213 set_irq_data(iommu->irq, NULL);
1214 /* This will mask the irq */
1215 free_irq(iommu->irq, iommu);
1216 destroy_irq(iommu->irq);
1217 }
1218
1219 kfree(iommu->domains);
1220 kfree(iommu->domain_ids);
1221
Weidong Hand9630fe2008-12-08 11:06:32 +08001222 g_iommus[iommu->seq_id] = NULL;
1223
1224 /* if all iommus are freed, free g_iommus */
1225 for (i = 0; i < g_num_of_iommus; i++) {
1226 if (g_iommus[i])
1227 break;
1228 }
1229
1230 if (i == g_num_of_iommus)
1231 kfree(g_iommus);
1232
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001233 /* free context mapping */
1234 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235}
1236
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001237static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001238{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001239 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240
1241 domain = alloc_domain_mem();
1242 if (!domain)
1243 return NULL;
1244
Weidong Han8c11e792008-12-08 15:29:22 +08001245 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001246 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001247
1248 return domain;
1249}
1250
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001251static int iommu_attach_domain(struct dmar_domain *domain,
1252 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001254 int num;
1255 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256 unsigned long flags;
1257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001258 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001259
1260 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001261
1262 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1263 if (num >= ndomains) {
1264 spin_unlock_irqrestore(&iommu->lock, flags);
1265 printk(KERN_ERR "IOMMU: no free domain ids\n");
1266 return -ENOMEM;
1267 }
1268
1269 domain->id = num;
1270 set_bit(num, iommu->domain_ids);
1271 set_bit(iommu->seq_id, &domain->iommu_bmp);
1272 iommu->domains[num] = domain;
1273 spin_unlock_irqrestore(&iommu->lock, flags);
1274
1275 return 0;
1276}
1277
1278static void iommu_detach_domain(struct dmar_domain *domain,
1279 struct intel_iommu *iommu)
1280{
1281 unsigned long flags;
1282 int num, ndomains;
1283 int found = 0;
1284
1285 spin_lock_irqsave(&iommu->lock, flags);
1286 ndomains = cap_ndoms(iommu->cap);
1287 num = find_first_bit(iommu->domain_ids, ndomains);
1288 for (; num < ndomains; ) {
1289 if (iommu->domains[num] == domain) {
1290 found = 1;
1291 break;
1292 }
1293 num = find_next_bit(iommu->domain_ids,
1294 cap_ndoms(iommu->cap), num+1);
1295 }
1296
1297 if (found) {
1298 clear_bit(num, iommu->domain_ids);
1299 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1300 iommu->domains[num] = NULL;
1301 }
Weidong Han8c11e792008-12-08 15:29:22 +08001302 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001303}
1304
1305static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001306static struct lock_class_key reserved_alloc_key;
1307static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308
1309static void dmar_init_reserved_ranges(void)
1310{
1311 struct pci_dev *pdev = NULL;
1312 struct iova *iova;
1313 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001314
David Millerf6611972008-02-06 01:36:23 -08001315 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001316
Mark Gross8a443df2008-03-04 14:59:31 -08001317 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1318 &reserved_alloc_key);
1319 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1320 &reserved_rbtree_key);
1321
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322 /* IOAPIC ranges shouldn't be accessed by DMA */
1323 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1324 IOVA_PFN(IOAPIC_RANGE_END));
1325 if (!iova)
1326 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1327
1328 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1329 for_each_pci_dev(pdev) {
1330 struct resource *r;
1331
1332 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1333 r = &pdev->resource[i];
1334 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1335 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001336 iova = reserve_iova(&reserved_iova_list,
1337 IOVA_PFN(r->start),
1338 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001339 if (!iova)
1340 printk(KERN_ERR "Reserve iova failed\n");
1341 }
1342 }
1343
1344}
1345
1346static void domain_reserve_special_ranges(struct dmar_domain *domain)
1347{
1348 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1349}
1350
1351static inline int guestwidth_to_adjustwidth(int gaw)
1352{
1353 int agaw;
1354 int r = (gaw - 12) % 9;
1355
1356 if (r == 0)
1357 agaw = gaw;
1358 else
1359 agaw = gaw + 9 - r;
1360 if (agaw > 64)
1361 agaw = 64;
1362 return agaw;
1363}
1364
1365static int domain_init(struct dmar_domain *domain, int guest_width)
1366{
1367 struct intel_iommu *iommu;
1368 int adjust_width, agaw;
1369 unsigned long sagaw;
1370
David Millerf6611972008-02-06 01:36:23 -08001371 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001372 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373
1374 domain_reserve_special_ranges(domain);
1375
1376 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001377 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001378 if (guest_width > cap_mgaw(iommu->cap))
1379 guest_width = cap_mgaw(iommu->cap);
1380 domain->gaw = guest_width;
1381 adjust_width = guestwidth_to_adjustwidth(guest_width);
1382 agaw = width_to_agaw(adjust_width);
1383 sagaw = cap_sagaw(iommu->cap);
1384 if (!test_bit(agaw, &sagaw)) {
1385 /* hardware doesn't support it, choose a bigger one */
1386 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1387 agaw = find_next_bit(&sagaw, 5, agaw);
1388 if (agaw >= 5)
1389 return -ENODEV;
1390 }
1391 domain->agaw = agaw;
1392 INIT_LIST_HEAD(&domain->devices);
1393
Weidong Han8e6040972008-12-08 15:49:06 +08001394 if (ecap_coherent(iommu->ecap))
1395 domain->iommu_coherency = 1;
1396 else
1397 domain->iommu_coherency = 0;
1398
Sheng Yang58c610b2009-03-18 15:33:05 +08001399 if (ecap_sc_support(iommu->ecap))
1400 domain->iommu_snooping = 1;
1401 else
1402 domain->iommu_snooping = 0;
1403
Weidong Hanc7151a82008-12-08 22:51:37 +08001404 domain->iommu_count = 1;
1405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 /* always allocate the top pgd */
1407 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1408 if (!domain->pgd)
1409 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001410 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411 return 0;
1412}
1413
1414static void domain_exit(struct dmar_domain *domain)
1415{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001416 struct dmar_drhd_unit *drhd;
1417 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418
1419 /* Domain 0 is reserved, so dont process it */
1420 if (!domain)
1421 return;
1422
1423 domain_remove_dev_info(domain);
1424 /* destroy iovas */
1425 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001426
1427 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001428 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429
1430 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001431 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001432
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001433 for_each_active_iommu(iommu, drhd)
1434 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1435 iommu_detach_domain(domain, iommu);
1436
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001437 free_domain_mem(domain);
1438}
1439
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001440static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1441 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001442{
1443 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001445 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001446 struct dma_pte *pgd;
1447 unsigned long num;
1448 unsigned long ndomains;
1449 int id;
1450 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001451 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001452
1453 pr_debug("Set context mapping for %02x:%02x.%d\n",
1454 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001457 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1458 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001459
David Woodhouse276dbf992009-04-04 01:45:37 +01001460 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001461 if (!iommu)
1462 return -ENODEV;
1463
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 context = device_to_context_entry(iommu, bus, devfn);
1465 if (!context)
1466 return -ENOMEM;
1467 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001468 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001469 spin_unlock_irqrestore(&iommu->lock, flags);
1470 return 0;
1471 }
1472
Weidong Hanea6606b2008-12-08 23:08:15 +08001473 id = domain->id;
1474 pgd = domain->pgd;
1475
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001476 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1477 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001478 int found = 0;
1479
1480 /* find an available domain id for this device in iommu */
1481 ndomains = cap_ndoms(iommu->cap);
1482 num = find_first_bit(iommu->domain_ids, ndomains);
1483 for (; num < ndomains; ) {
1484 if (iommu->domains[num] == domain) {
1485 id = num;
1486 found = 1;
1487 break;
1488 }
1489 num = find_next_bit(iommu->domain_ids,
1490 cap_ndoms(iommu->cap), num+1);
1491 }
1492
1493 if (found == 0) {
1494 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1495 if (num >= ndomains) {
1496 spin_unlock_irqrestore(&iommu->lock, flags);
1497 printk(KERN_ERR "IOMMU: no free domain ids\n");
1498 return -EFAULT;
1499 }
1500
1501 set_bit(num, iommu->domain_ids);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001502 set_bit(iommu->seq_id, &domain->iommu_bmp);
Weidong Hanea6606b2008-12-08 23:08:15 +08001503 iommu->domains[num] = domain;
1504 id = num;
1505 }
1506
1507 /* Skip top levels of page tables for
1508 * iommu which has less agaw than default.
1509 */
1510 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1511 pgd = phys_to_virt(dma_pte_addr(pgd));
1512 if (!dma_pte_present(pgd)) {
1513 spin_unlock_irqrestore(&iommu->lock, flags);
1514 return -ENOMEM;
1515 }
1516 }
1517 }
1518
1519 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001520
Yu Zhao93a23a72009-05-18 13:51:37 +08001521 if (translation != CONTEXT_TT_PASS_THROUGH) {
1522 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1523 translation = info ? CONTEXT_TT_DEV_IOTLB :
1524 CONTEXT_TT_MULTI_LEVEL;
1525 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001526 /*
1527 * In pass through mode, AW must be programmed to indicate the largest
1528 * AGAW value supported by hardware. And ASR is ignored by hardware.
1529 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001530 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001532 else {
1533 context_set_address_root(context, virt_to_phys(pgd));
1534 context_set_address_width(context, iommu->agaw);
1535 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001536
1537 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001538 context_set_fault_enable(context);
1539 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001540 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001542 /*
1543 * It's a non-present to present mapping. If hardware doesn't cache
1544 * non-present entry we only need to flush the write-buffer. If the
1545 * _does_ cache non-present entries, then it does so in the special
1546 * domain #0, which we have to flush:
1547 */
1548 if (cap_caching_mode(iommu->cap)) {
1549 iommu->flush.flush_context(iommu, 0,
1550 (((u16)bus) << 8) | devfn,
1551 DMA_CCMD_MASK_NOBIT,
1552 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001553 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001554 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001555 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001556 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001557 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001559
1560 spin_lock_irqsave(&domain->iommu_lock, flags);
1561 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1562 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001563 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001564 }
1565 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001566 return 0;
1567}
1568
1569static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001570domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1571 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572{
1573 int ret;
1574 struct pci_dev *tmp, *parent;
1575
David Woodhouse276dbf992009-04-04 01:45:37 +01001576 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001577 pdev->bus->number, pdev->devfn,
1578 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001579 if (ret)
1580 return ret;
1581
1582 /* dependent device mapping */
1583 tmp = pci_find_upstream_pcie_bridge(pdev);
1584 if (!tmp)
1585 return 0;
1586 /* Secondary interface's bus number and devfn 0 */
1587 parent = pdev->bus->self;
1588 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001589 ret = domain_context_mapping_one(domain,
1590 pci_domain_nr(parent->bus),
1591 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001592 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001593 if (ret)
1594 return ret;
1595 parent = parent->bus->self;
1596 }
1597 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1598 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001599 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001600 tmp->subordinate->number, 0,
1601 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001602 else /* this is a legacy PCI bridge */
1603 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001604 pci_domain_nr(tmp->bus),
1605 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001606 tmp->devfn,
1607 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001608}
1609
Weidong Han5331fe62008-12-08 23:00:00 +08001610static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001611{
1612 int ret;
1613 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001614 struct intel_iommu *iommu;
1615
David Woodhouse276dbf992009-04-04 01:45:37 +01001616 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1617 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001618 if (!iommu)
1619 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001620
David Woodhouse276dbf992009-04-04 01:45:37 +01001621 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001622 if (!ret)
1623 return ret;
1624 /* dependent device mapping */
1625 tmp = pci_find_upstream_pcie_bridge(pdev);
1626 if (!tmp)
1627 return ret;
1628 /* Secondary interface's bus number and devfn 0 */
1629 parent = pdev->bus->self;
1630 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001631 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001632 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 if (!ret)
1634 return ret;
1635 parent = parent->bus->self;
1636 }
1637 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001638 return device_context_mapped(iommu, tmp->subordinate->number,
1639 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001640 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001641 return device_context_mapped(iommu, tmp->bus->number,
1642 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001643}
1644
David Woodhouse9051aa02009-06-29 12:30:54 +01001645static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1646 struct scatterlist *sg, unsigned long phys_pfn,
1647 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001648{
1649 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001650 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001651 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001652 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001653
1654 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1655
1656 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1657 return -EINVAL;
1658
1659 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1660
David Woodhouse9051aa02009-06-29 12:30:54 +01001661 if (sg)
1662 sg_res = 0;
1663 else {
1664 sg_res = nr_pages + 1;
1665 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1666 }
1667
David Woodhousee1605492009-06-29 11:17:38 +01001668 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001669 uint64_t tmp;
1670
David Woodhousee1605492009-06-29 11:17:38 +01001671 if (!sg_res) {
1672 sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
1673 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1674 sg->dma_length = sg->length;
1675 pteval = page_to_phys(sg_page(sg)) | prot;
1676 }
1677 if (!pte) {
1678 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1679 if (!pte)
1680 return -ENOMEM;
1681 }
1682 /* We don't need lock here, nobody else
1683 * touches the iova range
1684 */
David Woodhousec85994e2009-07-01 19:21:24 +01001685 tmp = cmpxchg64(&pte->val, 0ULL, pteval);
1686 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001687 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001688 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1689 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001690 if (dumps) {
1691 dumps--;
1692 debug_dma_dump_mappings(NULL);
1693 }
1694 WARN_ON(1);
1695 }
David Woodhousee1605492009-06-29 11:17:38 +01001696 pte++;
1697 if (!nr_pages ||
1698 (unsigned long)pte >> VTD_PAGE_SHIFT !=
1699 (unsigned long)first_pte >> VTD_PAGE_SHIFT) {
1700 domain_flush_cache(domain, first_pte,
1701 (void *)pte - (void *)first_pte);
1702 pte = NULL;
1703 }
1704 iov_pfn++;
1705 pteval += VTD_PAGE_SIZE;
1706 sg_res--;
1707 if (!sg_res)
1708 sg = sg_next(sg);
1709 }
1710 return 0;
1711}
1712
David Woodhouse9051aa02009-06-29 12:30:54 +01001713static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1714 struct scatterlist *sg, unsigned long nr_pages,
1715 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001716{
David Woodhouse9051aa02009-06-29 12:30:54 +01001717 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1718}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001719
David Woodhouse9051aa02009-06-29 12:30:54 +01001720static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1721 unsigned long phys_pfn, unsigned long nr_pages,
1722 int prot)
1723{
1724 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001725}
1726
Weidong Hanc7151a82008-12-08 22:51:37 +08001727static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001728{
Weidong Hanc7151a82008-12-08 22:51:37 +08001729 if (!iommu)
1730 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001731
1732 clear_context_table(iommu, bus, devfn);
1733 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001734 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001735 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736}
1737
1738static void domain_remove_dev_info(struct dmar_domain *domain)
1739{
1740 struct device_domain_info *info;
1741 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001742 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001743
1744 spin_lock_irqsave(&device_domain_lock, flags);
1745 while (!list_empty(&domain->devices)) {
1746 info = list_entry(domain->devices.next,
1747 struct device_domain_info, link);
1748 list_del(&info->link);
1749 list_del(&info->global);
1750 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001751 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001752 spin_unlock_irqrestore(&device_domain_lock, flags);
1753
Yu Zhao93a23a72009-05-18 13:51:37 +08001754 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001755 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001756 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001757 free_devinfo_mem(info);
1758
1759 spin_lock_irqsave(&device_domain_lock, flags);
1760 }
1761 spin_unlock_irqrestore(&device_domain_lock, flags);
1762}
1763
1764/*
1765 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001766 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001767 */
Kay, Allen M38717942008-09-09 18:37:29 +03001768static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001769find_domain(struct pci_dev *pdev)
1770{
1771 struct device_domain_info *info;
1772
1773 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001774 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001775 if (info)
1776 return info->domain;
1777 return NULL;
1778}
1779
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780/* domain is initialized */
1781static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1782{
1783 struct dmar_domain *domain, *found = NULL;
1784 struct intel_iommu *iommu;
1785 struct dmar_drhd_unit *drhd;
1786 struct device_domain_info *info, *tmp;
1787 struct pci_dev *dev_tmp;
1788 unsigned long flags;
1789 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001790 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001791 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792
1793 domain = find_domain(pdev);
1794 if (domain)
1795 return domain;
1796
David Woodhouse276dbf992009-04-04 01:45:37 +01001797 segment = pci_domain_nr(pdev->bus);
1798
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001799 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1800 if (dev_tmp) {
1801 if (dev_tmp->is_pcie) {
1802 bus = dev_tmp->subordinate->number;
1803 devfn = 0;
1804 } else {
1805 bus = dev_tmp->bus->number;
1806 devfn = dev_tmp->devfn;
1807 }
1808 spin_lock_irqsave(&device_domain_lock, flags);
1809 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001810 if (info->segment == segment &&
1811 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001812 found = info->domain;
1813 break;
1814 }
1815 }
1816 spin_unlock_irqrestore(&device_domain_lock, flags);
1817 /* pcie-pci bridge already has a domain, uses it */
1818 if (found) {
1819 domain = found;
1820 goto found_domain;
1821 }
1822 }
1823
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001824 domain = alloc_domain();
1825 if (!domain)
1826 goto error;
1827
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001828 /* Allocate new domain for the device */
1829 drhd = dmar_find_matched_drhd_unit(pdev);
1830 if (!drhd) {
1831 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1832 pci_name(pdev));
1833 return NULL;
1834 }
1835 iommu = drhd->iommu;
1836
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001837 ret = iommu_attach_domain(domain, iommu);
1838 if (ret) {
1839 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001840 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001841 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001842
1843 if (domain_init(domain, gaw)) {
1844 domain_exit(domain);
1845 goto error;
1846 }
1847
1848 /* register pcie-to-pci device */
1849 if (dev_tmp) {
1850 info = alloc_devinfo_mem();
1851 if (!info) {
1852 domain_exit(domain);
1853 goto error;
1854 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001855 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001856 info->bus = bus;
1857 info->devfn = devfn;
1858 info->dev = NULL;
1859 info->domain = domain;
1860 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001861 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862
1863 /* pcie-to-pci bridge already has a domain, uses it */
1864 found = NULL;
1865 spin_lock_irqsave(&device_domain_lock, flags);
1866 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001867 if (tmp->segment == segment &&
1868 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001869 found = tmp->domain;
1870 break;
1871 }
1872 }
1873 if (found) {
1874 free_devinfo_mem(info);
1875 domain_exit(domain);
1876 domain = found;
1877 } else {
1878 list_add(&info->link, &domain->devices);
1879 list_add(&info->global, &device_domain_list);
1880 }
1881 spin_unlock_irqrestore(&device_domain_lock, flags);
1882 }
1883
1884found_domain:
1885 info = alloc_devinfo_mem();
1886 if (!info)
1887 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001888 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889 info->bus = pdev->bus->number;
1890 info->devfn = pdev->devfn;
1891 info->dev = pdev;
1892 info->domain = domain;
1893 spin_lock_irqsave(&device_domain_lock, flags);
1894 /* somebody is fast */
1895 found = find_domain(pdev);
1896 if (found != NULL) {
1897 spin_unlock_irqrestore(&device_domain_lock, flags);
1898 if (found != domain) {
1899 domain_exit(domain);
1900 domain = found;
1901 }
1902 free_devinfo_mem(info);
1903 return domain;
1904 }
1905 list_add(&info->link, &domain->devices);
1906 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001907 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001908 spin_unlock_irqrestore(&device_domain_lock, flags);
1909 return domain;
1910error:
1911 /* recheck it here, maybe others set it */
1912 return find_domain(pdev);
1913}
1914
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001915static int iommu_identity_mapping;
1916
David Woodhouseb2132032009-06-26 18:50:28 +01001917static int iommu_domain_identity_map(struct dmar_domain *domain,
1918 unsigned long long start,
1919 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001920{
David Woodhousec5395d52009-06-28 16:35:56 +01001921 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1922 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923
David Woodhousec5395d52009-06-28 16:35:56 +01001924 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1925 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001927 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 }
1929
David Woodhousec5395d52009-06-28 16:35:56 +01001930 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1931 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001932 /*
1933 * RMRR range might have overlap with physical memory range,
1934 * clear it first
1935 */
David Woodhousec5395d52009-06-28 16:35:56 +01001936 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001937
David Woodhousec5395d52009-06-28 16:35:56 +01001938 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1939 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001940 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001941}
1942
1943static int iommu_prepare_identity_map(struct pci_dev *pdev,
1944 unsigned long long start,
1945 unsigned long long end)
1946{
1947 struct dmar_domain *domain;
1948 int ret;
1949
1950 printk(KERN_INFO
1951 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1952 pci_name(pdev), start, end);
1953
David Woodhousec7ab48d2009-06-26 19:10:36 +01001954 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001955 if (!domain)
1956 return -ENOMEM;
1957
1958 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001959 if (ret)
1960 goto error;
1961
1962 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001963 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001964 if (ret)
1965 goto error;
1966
1967 return 0;
1968
1969 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001970 domain_exit(domain);
1971 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001972}
1973
1974static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1975 struct pci_dev *pdev)
1976{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001977 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001978 return 0;
1979 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1980 rmrr->end_address + 1);
1981}
1982
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001983#ifdef CONFIG_DMAR_FLOPPY_WA
1984static inline void iommu_prepare_isa(void)
1985{
1986 struct pci_dev *pdev;
1987 int ret;
1988
1989 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1990 if (!pdev)
1991 return;
1992
David Woodhousec7ab48d2009-06-26 19:10:36 +01001993 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001994 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1995
1996 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01001997 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
1998 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07001999
2000}
2001#else
2002static inline void iommu_prepare_isa(void)
2003{
2004 return;
2005}
2006#endif /* !CONFIG_DMAR_FLPY_WA */
2007
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002008/* Initialize each context entry as pass through.*/
2009static int __init init_context_pass_through(void)
2010{
2011 struct pci_dev *pdev = NULL;
2012 struct dmar_domain *domain;
2013 int ret;
2014
2015 for_each_pci_dev(pdev) {
2016 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2017 ret = domain_context_mapping(domain, pdev,
2018 CONTEXT_TT_PASS_THROUGH);
2019 if (ret)
2020 return ret;
2021 }
2022 return 0;
2023}
2024
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002025static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002026
2027static int __init si_domain_work_fn(unsigned long start_pfn,
2028 unsigned long end_pfn, void *datax)
2029{
2030 int *ret = datax;
2031
2032 *ret = iommu_domain_identity_map(si_domain,
2033 (uint64_t)start_pfn << PAGE_SHIFT,
2034 (uint64_t)end_pfn << PAGE_SHIFT);
2035 return *ret;
2036
2037}
2038
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002039static int si_domain_init(void)
2040{
2041 struct dmar_drhd_unit *drhd;
2042 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002043 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002044
2045 si_domain = alloc_domain();
2046 if (!si_domain)
2047 return -EFAULT;
2048
David Woodhousec7ab48d2009-06-26 19:10:36 +01002049 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002050
2051 for_each_active_iommu(iommu, drhd) {
2052 ret = iommu_attach_domain(si_domain, iommu);
2053 if (ret) {
2054 domain_exit(si_domain);
2055 return -EFAULT;
2056 }
2057 }
2058
2059 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2060 domain_exit(si_domain);
2061 return -EFAULT;
2062 }
2063
2064 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2065
David Woodhousec7ab48d2009-06-26 19:10:36 +01002066 for_each_online_node(nid) {
2067 work_with_active_regions(nid, si_domain_work_fn, &ret);
2068 if (ret)
2069 return ret;
2070 }
2071
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002072 return 0;
2073}
2074
2075static void domain_remove_one_dev_info(struct dmar_domain *domain,
2076 struct pci_dev *pdev);
2077static int identity_mapping(struct pci_dev *pdev)
2078{
2079 struct device_domain_info *info;
2080
2081 if (likely(!iommu_identity_mapping))
2082 return 0;
2083
2084
2085 list_for_each_entry(info, &si_domain->devices, link)
2086 if (info->dev == pdev)
2087 return 1;
2088 return 0;
2089}
2090
2091static int domain_add_dev_info(struct dmar_domain *domain,
2092 struct pci_dev *pdev)
2093{
2094 struct device_domain_info *info;
2095 unsigned long flags;
2096
2097 info = alloc_devinfo_mem();
2098 if (!info)
2099 return -ENOMEM;
2100
2101 info->segment = pci_domain_nr(pdev->bus);
2102 info->bus = pdev->bus->number;
2103 info->devfn = pdev->devfn;
2104 info->dev = pdev;
2105 info->domain = domain;
2106
2107 spin_lock_irqsave(&device_domain_lock, flags);
2108 list_add(&info->link, &domain->devices);
2109 list_add(&info->global, &device_domain_list);
2110 pdev->dev.archdata.iommu = info;
2111 spin_unlock_irqrestore(&device_domain_lock, flags);
2112
2113 return 0;
2114}
2115
2116static int iommu_prepare_static_identity_mapping(void)
2117{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002118 struct pci_dev *pdev = NULL;
2119 int ret;
2120
2121 ret = si_domain_init();
2122 if (ret)
2123 return -EFAULT;
2124
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002125 for_each_pci_dev(pdev) {
David Woodhousec7ab48d2009-06-26 19:10:36 +01002126 printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
2127 pci_name(pdev));
2128
2129 ret = domain_context_mapping(si_domain, pdev,
2130 CONTEXT_TT_MULTI_LEVEL);
2131 if (ret)
2132 return ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002133 ret = domain_add_dev_info(si_domain, pdev);
2134 if (ret)
2135 return ret;
2136 }
2137
2138 return 0;
2139}
2140
2141int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002142{
2143 struct dmar_drhd_unit *drhd;
2144 struct dmar_rmrr_unit *rmrr;
2145 struct pci_dev *pdev;
2146 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002147 int i, ret;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002148 int pass_through = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002149
2150 /*
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002151 * In case pass through can not be enabled, iommu tries to use identity
2152 * mapping.
2153 */
2154 if (iommu_pass_through)
2155 iommu_identity_mapping = 1;
2156
2157 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002158 * for each drhd
2159 * allocate root
2160 * initialize and program root entry to not present
2161 * endfor
2162 */
2163 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002164 g_num_of_iommus++;
2165 /*
2166 * lock not needed as this is only incremented in the single
2167 * threaded kernel __init code path all other access are read
2168 * only
2169 */
2170 }
2171
Weidong Hand9630fe2008-12-08 11:06:32 +08002172 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2173 GFP_KERNEL);
2174 if (!g_iommus) {
2175 printk(KERN_ERR "Allocating global iommu array failed\n");
2176 ret = -ENOMEM;
2177 goto error;
2178 }
2179
mark gross80b20dd2008-04-18 13:53:58 -07002180 deferred_flush = kzalloc(g_num_of_iommus *
2181 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2182 if (!deferred_flush) {
Weidong Hand9630fe2008-12-08 11:06:32 +08002183 kfree(g_iommus);
mark gross5e0d2a62008-03-04 15:22:08 -08002184 ret = -ENOMEM;
2185 goto error;
2186 }
2187
mark gross5e0d2a62008-03-04 15:22:08 -08002188 for_each_drhd_unit(drhd) {
2189 if (drhd->ignored)
2190 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002191
2192 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002193 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002194
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002195 ret = iommu_init_domains(iommu);
2196 if (ret)
2197 goto error;
2198
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002199 /*
2200 * TBD:
2201 * we could share the same root & context tables
2202 * amoung all IOMMU's. Need to Split it later.
2203 */
2204 ret = iommu_alloc_root_entry(iommu);
2205 if (ret) {
2206 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2207 goto error;
2208 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002209 if (!ecap_pass_through(iommu->ecap))
2210 pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002211 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002212 if (iommu_pass_through)
2213 if (!pass_through) {
2214 printk(KERN_INFO
2215 "Pass Through is not supported by hardware.\n");
2216 iommu_pass_through = 0;
2217 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002218
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002219 /*
2220 * Start from the sane iommu hardware state.
2221 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002222 for_each_drhd_unit(drhd) {
2223 if (drhd->ignored)
2224 continue;
2225
2226 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002227
2228 /*
2229 * If the queued invalidation is already initialized by us
2230 * (for example, while enabling interrupt-remapping) then
2231 * we got the things already rolling from a sane state.
2232 */
2233 if (iommu->qi)
2234 continue;
2235
2236 /*
2237 * Clear any previous faults.
2238 */
2239 dmar_fault(-1, iommu);
2240 /*
2241 * Disable queued invalidation if supported and already enabled
2242 * before OS handover.
2243 */
2244 dmar_disable_qi(iommu);
2245 }
2246
2247 for_each_drhd_unit(drhd) {
2248 if (drhd->ignored)
2249 continue;
2250
2251 iommu = drhd->iommu;
2252
Youquan Songa77b67d2008-10-16 16:31:56 -07002253 if (dmar_enable_qi(iommu)) {
2254 /*
2255 * Queued Invalidate not enabled, use Register Based
2256 * Invalidate
2257 */
2258 iommu->flush.flush_context = __iommu_flush_context;
2259 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2260 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002261 "invalidation\n",
2262 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002263 } else {
2264 iommu->flush.flush_context = qi_flush_context;
2265 iommu->flush.flush_iotlb = qi_flush_iotlb;
2266 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002267 "invalidation\n",
2268 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002269 }
2270 }
2271
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002272 /*
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002273 * If pass through is set and enabled, context entries of all pci
2274 * devices are intialized by pass through translation type.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002275 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002276 if (iommu_pass_through) {
2277 ret = init_context_pass_through();
2278 if (ret) {
2279 printk(KERN_ERR "IOMMU: Pass through init failed.\n");
2280 iommu_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002281 }
2282 }
2283
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002284 /*
2285 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002286 * identity mappings for rmrr, gfx, and isa and may fall back to static
2287 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002288 */
2289 if (!iommu_pass_through) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002290 if (iommu_identity_mapping)
2291 iommu_prepare_static_identity_mapping();
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002292 /*
2293 * For each rmrr
2294 * for each dev attached to rmrr
2295 * do
2296 * locate drhd for dev, alloc domain for dev
2297 * allocate free domain
2298 * allocate page table entries for rmrr
2299 * if context not allocated for bus
2300 * allocate and init context
2301 * set present in root table for this bus
2302 * init context with domain, translation etc
2303 * endfor
2304 * endfor
2305 */
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002306 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002307 for_each_rmrr_units(rmrr) {
2308 for (i = 0; i < rmrr->devices_cnt; i++) {
2309 pdev = rmrr->devices[i];
2310 /*
2311 * some BIOS lists non-exist devices in DMAR
2312 * table.
2313 */
2314 if (!pdev)
2315 continue;
2316 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2317 if (ret)
2318 printk(KERN_ERR
2319 "IOMMU: mapping reserved region failed\n");
2320 }
2321 }
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07002322
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002323 iommu_prepare_isa();
2324 }
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002325
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002326 /*
2327 * for each drhd
2328 * enable fault log
2329 * global invalidate context cache
2330 * global invalidate iotlb
2331 * enable translation
2332 */
2333 for_each_drhd_unit(drhd) {
2334 if (drhd->ignored)
2335 continue;
2336 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002337
2338 iommu_flush_write_buffer(iommu);
2339
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002340 ret = dmar_set_interrupt(iommu);
2341 if (ret)
2342 goto error;
2343
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002344 iommu_set_root_entry(iommu);
2345
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002346 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002347 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002348 iommu_disable_protect_mem_regions(iommu);
2349
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002350 ret = iommu_enable_translation(iommu);
2351 if (ret)
2352 goto error;
2353 }
2354
2355 return 0;
2356error:
2357 for_each_drhd_unit(drhd) {
2358 if (drhd->ignored)
2359 continue;
2360 iommu = drhd->iommu;
2361 free_iommu(iommu);
2362 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002363 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002364 return ret;
2365}
2366
David Woodhouse88cb6a72009-06-28 15:03:06 +01002367static inline unsigned long aligned_nrpages(unsigned long host_addr,
2368 size_t size)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002369{
David Woodhouse88cb6a72009-06-28 15:03:06 +01002370 host_addr &= ~PAGE_MASK;
2371 host_addr += size + PAGE_SIZE - 1;
2372
2373 return host_addr >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002374}
2375
David Woodhouse875764d2009-06-28 21:20:51 +01002376static struct iova *intel_alloc_iova(struct device *dev,
2377 struct dmar_domain *domain,
2378 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002379{
2380 struct pci_dev *pdev = to_pci_dev(dev);
2381 struct iova *iova = NULL;
2382
David Woodhouse875764d2009-06-28 21:20:51 +01002383 /* Restrict dma_mask to the width that the iommu can handle */
2384 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2385
2386 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002387 /*
2388 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002389 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002390 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002391 */
David Woodhouse875764d2009-06-28 21:20:51 +01002392 iova = alloc_iova(&domain->iovad, nrpages,
2393 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2394 if (iova)
2395 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002396 }
David Woodhouse875764d2009-06-28 21:20:51 +01002397 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2398 if (unlikely(!iova)) {
2399 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2400 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002401 return NULL;
2402 }
2403
2404 return iova;
2405}
2406
2407static struct dmar_domain *
2408get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002409{
2410 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002412
2413 domain = get_domain_for_dev(pdev,
2414 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2415 if (!domain) {
2416 printk(KERN_ERR
2417 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002418 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002419 }
2420
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002421 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002422 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002423 ret = domain_context_mapping(domain, pdev,
2424 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002425 if (ret) {
2426 printk(KERN_ERR
2427 "Domain context map for %s failed",
2428 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002429 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002430 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002431 }
2432
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002433 return domain;
2434}
2435
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002436static int iommu_dummy(struct pci_dev *pdev)
2437{
2438 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2439}
2440
2441/* Check if the pdev needs to go through non-identity map and unmap process.*/
2442static int iommu_no_mapping(struct pci_dev *pdev)
2443{
2444 int found;
2445
2446 if (!iommu_identity_mapping)
2447 return iommu_dummy(pdev);
2448
2449 found = identity_mapping(pdev);
2450 if (found) {
2451 if (pdev->dma_mask > DMA_BIT_MASK(32))
2452 return 1;
2453 else {
2454 /*
2455 * 32 bit DMA is removed from si_domain and fall back
2456 * to non-identity mapping.
2457 */
2458 domain_remove_one_dev_info(si_domain, pdev);
2459 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2460 pci_name(pdev));
2461 return 0;
2462 }
2463 } else {
2464 /*
2465 * In case of a detached 64 bit DMA device from vm, the device
2466 * is put into si_domain for identity mapping.
2467 */
2468 if (pdev->dma_mask > DMA_BIT_MASK(32)) {
2469 int ret;
2470 ret = domain_add_dev_info(si_domain, pdev);
2471 if (!ret) {
2472 printk(KERN_INFO "64bit %s uses identity mapping\n",
2473 pci_name(pdev));
2474 return 1;
2475 }
2476 }
2477 }
2478
2479 return iommu_dummy(pdev);
2480}
2481
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002482static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2483 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002484{
2485 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002486 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002487 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002488 struct iova *iova;
2489 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002490 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002491 struct intel_iommu *iommu;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002492
2493 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002494
2495 if (iommu_no_mapping(pdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002496 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002497
2498 domain = get_valid_domain_for_dev(pdev);
2499 if (!domain)
2500 return 0;
2501
Weidong Han8c11e792008-12-08 15:29:22 +08002502 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002503 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002504
David Woodhouse875764d2009-06-28 21:20:51 +01002505 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002506 if (!iova)
2507 goto error;
2508
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002509 /*
2510 * Check if DMAR supports zero-length reads on write only
2511 * mappings..
2512 */
2513 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002514 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002515 prot |= DMA_PTE_READ;
2516 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2517 prot |= DMA_PTE_WRITE;
2518 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002519 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002520 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002521 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002522 * is not a big problem
2523 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002524 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2525 paddr >> VTD_PAGE_SHIFT, size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002526 if (ret)
2527 goto error;
2528
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002529 /* it's a non-present to present mapping. Only flush if caching mode */
2530 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002531 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002532 else
Weidong Han8c11e792008-12-08 15:29:22 +08002533 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002534
David Woodhouse03d6a242009-06-28 15:33:46 +01002535 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2536 start_paddr += paddr & ~PAGE_MASK;
2537 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002538
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002539error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002540 if (iova)
2541 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002542 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002543 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002544 return 0;
2545}
2546
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002547static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2548 unsigned long offset, size_t size,
2549 enum dma_data_direction dir,
2550 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002551{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002552 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2553 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002554}
2555
mark gross5e0d2a62008-03-04 15:22:08 -08002556static void flush_unmaps(void)
2557{
mark gross80b20dd2008-04-18 13:53:58 -07002558 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002559
mark gross5e0d2a62008-03-04 15:22:08 -08002560 timer_on = 0;
2561
2562 /* just flush them all */
2563 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002564 struct intel_iommu *iommu = g_iommus[i];
2565 if (!iommu)
2566 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002567
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002568 if (!deferred_flush[i].next)
2569 continue;
2570
2571 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002572 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002573 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002574 unsigned long mask;
2575 struct iova *iova = deferred_flush[i].iova[j];
2576
2577 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2578 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2579 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2580 iova->pfn_lo << PAGE_SHIFT, mask);
2581 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002582 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002583 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002584 }
2585
mark gross5e0d2a62008-03-04 15:22:08 -08002586 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002587}
2588
2589static void flush_unmaps_timeout(unsigned long data)
2590{
mark gross80b20dd2008-04-18 13:53:58 -07002591 unsigned long flags;
2592
2593 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002594 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002595 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002596}
2597
2598static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2599{
2600 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002601 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002602 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002603
2604 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002605 if (list_size == HIGH_WATER_MARK)
2606 flush_unmaps();
2607
Weidong Han8c11e792008-12-08 15:29:22 +08002608 iommu = domain_get_iommu(dom);
2609 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002610
mark gross80b20dd2008-04-18 13:53:58 -07002611 next = deferred_flush[iommu_id].next;
2612 deferred_flush[iommu_id].domain[next] = dom;
2613 deferred_flush[iommu_id].iova[next] = iova;
2614 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002615
2616 if (!timer_on) {
2617 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2618 timer_on = 1;
2619 }
2620 list_size++;
2621 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2622}
2623
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002624static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2625 size_t size, enum dma_data_direction dir,
2626 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002627{
2628 struct pci_dev *pdev = to_pci_dev(dev);
2629 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002630 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002631 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002632 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002633
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002634 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002635 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002636
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002637 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002638 BUG_ON(!domain);
2639
Weidong Han8c11e792008-12-08 15:29:22 +08002640 iommu = domain_get_iommu(domain);
2641
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002642 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2643 if (!iova)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002644 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002645
David Woodhoused794dc92009-06-28 00:27:49 +01002646 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2647 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002648
David Woodhoused794dc92009-06-28 00:27:49 +01002649 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2650 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002651
2652 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002653 dma_pte_clear_range(domain, start_pfn, last_pfn);
2654
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002655 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002656 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2657
mark gross5e0d2a62008-03-04 15:22:08 -08002658 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002659 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002660 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002661 /* free iova */
2662 __free_iova(&domain->iovad, iova);
2663 } else {
2664 add_unmap(domain, iova);
2665 /*
2666 * queue up the release of the unmap to save the 1/6th of the
2667 * cpu used up by the iotlb flush operation...
2668 */
mark gross5e0d2a62008-03-04 15:22:08 -08002669 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002670}
2671
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002672static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2673 int dir)
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002674{
2675 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2676}
2677
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002678static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2679 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002680{
2681 void *vaddr;
2682 int order;
2683
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002684 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002685 order = get_order(size);
2686 flags &= ~(GFP_DMA | GFP_DMA32);
2687
2688 vaddr = (void *)__get_free_pages(flags, order);
2689 if (!vaddr)
2690 return NULL;
2691 memset(vaddr, 0, size);
2692
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002693 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2694 DMA_BIDIRECTIONAL,
2695 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002696 if (*dma_handle)
2697 return vaddr;
2698 free_pages((unsigned long)vaddr, order);
2699 return NULL;
2700}
2701
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002702static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2703 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002704{
2705 int order;
2706
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002707 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002708 order = get_order(size);
2709
2710 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2711 free_pages((unsigned long)vaddr, order);
2712}
2713
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002714static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2715 int nelems, enum dma_data_direction dir,
2716 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002717{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002718 struct pci_dev *pdev = to_pci_dev(hwdev);
2719 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002720 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002721 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002722 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002723
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002724 if (iommu_no_mapping(pdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002725 return;
2726
2727 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002728 BUG_ON(!domain);
2729
2730 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002731
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002732 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733 if (!iova)
2734 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002735
David Woodhoused794dc92009-06-28 00:27:49 +01002736 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2737 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002738
2739 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002740 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002741
David Woodhoused794dc92009-06-28 00:27:49 +01002742 /* free page tables */
2743 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2744
David Woodhouse03d6a242009-06-28 15:33:46 +01002745 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002746 (last_pfn - start_pfn + 1));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002747
2748 /* free iova */
2749 __free_iova(&domain->iovad, iova);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002750}
2751
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002753 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002754{
2755 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002756 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002757
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002758 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002759 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002760 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002761 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762 }
2763 return nelems;
2764}
2765
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002766static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2767 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002769 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002770 struct pci_dev *pdev = to_pci_dev(hwdev);
2771 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002772 size_t size = 0;
2773 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002774 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002775 struct iova *iova = NULL;
2776 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002777 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002778 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002779 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002780
2781 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002782 if (iommu_no_mapping(pdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002783 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002784
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002785 domain = get_valid_domain_for_dev(pdev);
2786 if (!domain)
2787 return 0;
2788
Weidong Han8c11e792008-12-08 15:29:22 +08002789 iommu = domain_get_iommu(domain);
2790
David Woodhouseb536d242009-06-28 14:49:31 +01002791 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002792 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002793
David Woodhouse875764d2009-06-28 21:20:51 +01002794 iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002795 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002796 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002797 return 0;
2798 }
2799
2800 /*
2801 * Check if DMAR supports zero-length reads on write only
2802 * mappings..
2803 */
2804 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002805 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002806 prot |= DMA_PTE_READ;
2807 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2808 prot |= DMA_PTE_WRITE;
2809
David Woodhouseb536d242009-06-28 14:49:31 +01002810 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002811
2812 ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
2813 if (unlikely(ret)) {
2814 /* clear the page */
2815 dma_pte_clear_range(domain, start_vpfn,
2816 start_vpfn + size - 1);
2817 /* free page tables */
2818 dma_pte_free_pagetable(domain, start_vpfn,
2819 start_vpfn + size - 1);
2820 /* free iova */
2821 __free_iova(&domain->iovad, iova);
2822 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002823 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002824
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002825 /* it's a non-present to present mapping. Only flush if caching mode */
2826 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002827 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002828 else
Weidong Han8c11e792008-12-08 15:29:22 +08002829 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002830
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002831 return nelems;
2832}
2833
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002834static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2835{
2836 return !dma_addr;
2837}
2838
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002839struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002840 .alloc_coherent = intel_alloc_coherent,
2841 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842 .map_sg = intel_map_sg,
2843 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002844 .map_page = intel_map_page,
2845 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002846 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002847};
2848
2849static inline int iommu_domain_cache_init(void)
2850{
2851 int ret = 0;
2852
2853 iommu_domain_cache = kmem_cache_create("iommu_domain",
2854 sizeof(struct dmar_domain),
2855 0,
2856 SLAB_HWCACHE_ALIGN,
2857
2858 NULL);
2859 if (!iommu_domain_cache) {
2860 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2861 ret = -ENOMEM;
2862 }
2863
2864 return ret;
2865}
2866
2867static inline int iommu_devinfo_cache_init(void)
2868{
2869 int ret = 0;
2870
2871 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2872 sizeof(struct device_domain_info),
2873 0,
2874 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002875 NULL);
2876 if (!iommu_devinfo_cache) {
2877 printk(KERN_ERR "Couldn't create devinfo cache\n");
2878 ret = -ENOMEM;
2879 }
2880
2881 return ret;
2882}
2883
2884static inline int iommu_iova_cache_init(void)
2885{
2886 int ret = 0;
2887
2888 iommu_iova_cache = kmem_cache_create("iommu_iova",
2889 sizeof(struct iova),
2890 0,
2891 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002892 NULL);
2893 if (!iommu_iova_cache) {
2894 printk(KERN_ERR "Couldn't create iova cache\n");
2895 ret = -ENOMEM;
2896 }
2897
2898 return ret;
2899}
2900
2901static int __init iommu_init_mempool(void)
2902{
2903 int ret;
2904 ret = iommu_iova_cache_init();
2905 if (ret)
2906 return ret;
2907
2908 ret = iommu_domain_cache_init();
2909 if (ret)
2910 goto domain_error;
2911
2912 ret = iommu_devinfo_cache_init();
2913 if (!ret)
2914 return ret;
2915
2916 kmem_cache_destroy(iommu_domain_cache);
2917domain_error:
2918 kmem_cache_destroy(iommu_iova_cache);
2919
2920 return -ENOMEM;
2921}
2922
2923static void __init iommu_exit_mempool(void)
2924{
2925 kmem_cache_destroy(iommu_devinfo_cache);
2926 kmem_cache_destroy(iommu_domain_cache);
2927 kmem_cache_destroy(iommu_iova_cache);
2928
2929}
2930
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002931static void __init init_no_remapping_devices(void)
2932{
2933 struct dmar_drhd_unit *drhd;
2934
2935 for_each_drhd_unit(drhd) {
2936 if (!drhd->include_all) {
2937 int i;
2938 for (i = 0; i < drhd->devices_cnt; i++)
2939 if (drhd->devices[i] != NULL)
2940 break;
2941 /* ignore DMAR unit if no pci devices exist */
2942 if (i == drhd->devices_cnt)
2943 drhd->ignored = 1;
2944 }
2945 }
2946
2947 if (dmar_map_gfx)
2948 return;
2949
2950 for_each_drhd_unit(drhd) {
2951 int i;
2952 if (drhd->ignored || drhd->include_all)
2953 continue;
2954
2955 for (i = 0; i < drhd->devices_cnt; i++)
2956 if (drhd->devices[i] &&
2957 !IS_GFX_DEVICE(drhd->devices[i]))
2958 break;
2959
2960 if (i < drhd->devices_cnt)
2961 continue;
2962
2963 /* bypass IOMMU if it is just for gfx devices */
2964 drhd->ignored = 1;
2965 for (i = 0; i < drhd->devices_cnt; i++) {
2966 if (!drhd->devices[i])
2967 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002968 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002969 }
2970 }
2971}
2972
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002973#ifdef CONFIG_SUSPEND
2974static int init_iommu_hw(void)
2975{
2976 struct dmar_drhd_unit *drhd;
2977 struct intel_iommu *iommu = NULL;
2978
2979 for_each_active_iommu(iommu, drhd)
2980 if (iommu->qi)
2981 dmar_reenable_qi(iommu);
2982
2983 for_each_active_iommu(iommu, drhd) {
2984 iommu_flush_write_buffer(iommu);
2985
2986 iommu_set_root_entry(iommu);
2987
2988 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002989 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002990 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002991 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07002992 iommu_disable_protect_mem_regions(iommu);
2993 iommu_enable_translation(iommu);
2994 }
2995
2996 return 0;
2997}
2998
2999static void iommu_flush_all(void)
3000{
3001 struct dmar_drhd_unit *drhd;
3002 struct intel_iommu *iommu;
3003
3004 for_each_active_iommu(iommu, drhd) {
3005 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003006 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003007 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003008 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003009 }
3010}
3011
3012static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3013{
3014 struct dmar_drhd_unit *drhd;
3015 struct intel_iommu *iommu = NULL;
3016 unsigned long flag;
3017
3018 for_each_active_iommu(iommu, drhd) {
3019 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3020 GFP_ATOMIC);
3021 if (!iommu->iommu_state)
3022 goto nomem;
3023 }
3024
3025 iommu_flush_all();
3026
3027 for_each_active_iommu(iommu, drhd) {
3028 iommu_disable_translation(iommu);
3029
3030 spin_lock_irqsave(&iommu->register_lock, flag);
3031
3032 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3033 readl(iommu->reg + DMAR_FECTL_REG);
3034 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3035 readl(iommu->reg + DMAR_FEDATA_REG);
3036 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3037 readl(iommu->reg + DMAR_FEADDR_REG);
3038 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3039 readl(iommu->reg + DMAR_FEUADDR_REG);
3040
3041 spin_unlock_irqrestore(&iommu->register_lock, flag);
3042 }
3043 return 0;
3044
3045nomem:
3046 for_each_active_iommu(iommu, drhd)
3047 kfree(iommu->iommu_state);
3048
3049 return -ENOMEM;
3050}
3051
3052static int iommu_resume(struct sys_device *dev)
3053{
3054 struct dmar_drhd_unit *drhd;
3055 struct intel_iommu *iommu = NULL;
3056 unsigned long flag;
3057
3058 if (init_iommu_hw()) {
3059 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3060 return -EIO;
3061 }
3062
3063 for_each_active_iommu(iommu, drhd) {
3064
3065 spin_lock_irqsave(&iommu->register_lock, flag);
3066
3067 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3068 iommu->reg + DMAR_FECTL_REG);
3069 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3070 iommu->reg + DMAR_FEDATA_REG);
3071 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3072 iommu->reg + DMAR_FEADDR_REG);
3073 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3074 iommu->reg + DMAR_FEUADDR_REG);
3075
3076 spin_unlock_irqrestore(&iommu->register_lock, flag);
3077 }
3078
3079 for_each_active_iommu(iommu, drhd)
3080 kfree(iommu->iommu_state);
3081
3082 return 0;
3083}
3084
3085static struct sysdev_class iommu_sysclass = {
3086 .name = "iommu",
3087 .resume = iommu_resume,
3088 .suspend = iommu_suspend,
3089};
3090
3091static struct sys_device device_iommu = {
3092 .cls = &iommu_sysclass,
3093};
3094
3095static int __init init_iommu_sysfs(void)
3096{
3097 int error;
3098
3099 error = sysdev_class_register(&iommu_sysclass);
3100 if (error)
3101 return error;
3102
3103 error = sysdev_register(&device_iommu);
3104 if (error)
3105 sysdev_class_unregister(&iommu_sysclass);
3106
3107 return error;
3108}
3109
3110#else
3111static int __init init_iommu_sysfs(void)
3112{
3113 return 0;
3114}
3115#endif /* CONFIG_PM */
3116
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003117int __init intel_iommu_init(void)
3118{
3119 int ret = 0;
3120
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003121 if (dmar_table_init())
3122 return -ENODEV;
3123
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003124 if (dmar_dev_scope_init())
3125 return -ENODEV;
3126
Suresh Siddha2ae21012008-07-10 11:16:43 -07003127 /*
3128 * Check the need for DMA-remapping initialization now.
3129 * Above initialization will also be used by Interrupt-remapping.
3130 */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003131 if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003132 return -ENODEV;
3133
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003134 iommu_init_mempool();
3135 dmar_init_reserved_ranges();
3136
3137 init_no_remapping_devices();
3138
3139 ret = init_dmars();
3140 if (ret) {
3141 printk(KERN_ERR "IOMMU: dmar init failed\n");
3142 put_iova_domain(&reserved_iova_list);
3143 iommu_exit_mempool();
3144 return ret;
3145 }
3146 printk(KERN_INFO
3147 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3148
mark gross5e0d2a62008-03-04 15:22:08 -08003149 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003150 force_iommu = 1;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003151
3152 if (!iommu_pass_through) {
3153 printk(KERN_INFO
3154 "Multi-level page-table translation for DMAR.\n");
3155 dma_ops = &intel_dma_ops;
3156 } else
3157 printk(KERN_INFO
3158 "DMAR: Pass through translation for DMAR.\n");
3159
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003160 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003161
3162 register_iommu(&intel_iommu_ops);
3163
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003164 return 0;
3165}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003166
Han, Weidong3199aa62009-02-26 17:31:12 +08003167static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3168 struct pci_dev *pdev)
3169{
3170 struct pci_dev *tmp, *parent;
3171
3172 if (!iommu || !pdev)
3173 return;
3174
3175 /* dependent device detach */
3176 tmp = pci_find_upstream_pcie_bridge(pdev);
3177 /* Secondary interface's bus number and devfn 0 */
3178 if (tmp) {
3179 parent = pdev->bus->self;
3180 while (parent != tmp) {
3181 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003182 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003183 parent = parent->bus->self;
3184 }
3185 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3186 iommu_detach_dev(iommu,
3187 tmp->subordinate->number, 0);
3188 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003189 iommu_detach_dev(iommu, tmp->bus->number,
3190 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003191 }
3192}
3193
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003194static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003195 struct pci_dev *pdev)
3196{
3197 struct device_domain_info *info;
3198 struct intel_iommu *iommu;
3199 unsigned long flags;
3200 int found = 0;
3201 struct list_head *entry, *tmp;
3202
David Woodhouse276dbf992009-04-04 01:45:37 +01003203 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3204 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003205 if (!iommu)
3206 return;
3207
3208 spin_lock_irqsave(&device_domain_lock, flags);
3209 list_for_each_safe(entry, tmp, &domain->devices) {
3210 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003211 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003212 if (info->bus == pdev->bus->number &&
3213 info->devfn == pdev->devfn) {
3214 list_del(&info->link);
3215 list_del(&info->global);
3216 if (info->dev)
3217 info->dev->dev.archdata.iommu = NULL;
3218 spin_unlock_irqrestore(&device_domain_lock, flags);
3219
Yu Zhao93a23a72009-05-18 13:51:37 +08003220 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003221 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003222 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003223 free_devinfo_mem(info);
3224
3225 spin_lock_irqsave(&device_domain_lock, flags);
3226
3227 if (found)
3228 break;
3229 else
3230 continue;
3231 }
3232
3233 /* if there is no other devices under the same iommu
3234 * owned by this domain, clear this iommu in iommu_bmp
3235 * update iommu count and coherency
3236 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003237 if (iommu == device_to_iommu(info->segment, info->bus,
3238 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003239 found = 1;
3240 }
3241
3242 if (found == 0) {
3243 unsigned long tmp_flags;
3244 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3245 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3246 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003247 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003248 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3249 }
3250
3251 spin_unlock_irqrestore(&device_domain_lock, flags);
3252}
3253
3254static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3255{
3256 struct device_domain_info *info;
3257 struct intel_iommu *iommu;
3258 unsigned long flags1, flags2;
3259
3260 spin_lock_irqsave(&device_domain_lock, flags1);
3261 while (!list_empty(&domain->devices)) {
3262 info = list_entry(domain->devices.next,
3263 struct device_domain_info, link);
3264 list_del(&info->link);
3265 list_del(&info->global);
3266 if (info->dev)
3267 info->dev->dev.archdata.iommu = NULL;
3268
3269 spin_unlock_irqrestore(&device_domain_lock, flags1);
3270
Yu Zhao93a23a72009-05-18 13:51:37 +08003271 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003272 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003273 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003274 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003275
3276 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003277 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003278 */
3279 spin_lock_irqsave(&domain->iommu_lock, flags2);
3280 if (test_and_clear_bit(iommu->seq_id,
3281 &domain->iommu_bmp)) {
3282 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003283 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003284 }
3285 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3286
3287 free_devinfo_mem(info);
3288 spin_lock_irqsave(&device_domain_lock, flags1);
3289 }
3290 spin_unlock_irqrestore(&device_domain_lock, flags1);
3291}
3292
Weidong Han5e98c4b2008-12-08 23:03:27 +08003293/* domain id for virtual machine, it won't be set in context */
3294static unsigned long vm_domid;
3295
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003296static int vm_domain_min_agaw(struct dmar_domain *domain)
3297{
3298 int i;
3299 int min_agaw = domain->agaw;
3300
3301 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3302 for (; i < g_num_of_iommus; ) {
3303 if (min_agaw > g_iommus[i]->agaw)
3304 min_agaw = g_iommus[i]->agaw;
3305
3306 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3307 }
3308
3309 return min_agaw;
3310}
3311
Weidong Han5e98c4b2008-12-08 23:03:27 +08003312static struct dmar_domain *iommu_alloc_vm_domain(void)
3313{
3314 struct dmar_domain *domain;
3315
3316 domain = alloc_domain_mem();
3317 if (!domain)
3318 return NULL;
3319
3320 domain->id = vm_domid++;
3321 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3322 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3323
3324 return domain;
3325}
3326
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003327static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003328{
3329 int adjust_width;
3330
3331 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003332 spin_lock_init(&domain->iommu_lock);
3333
3334 domain_reserve_special_ranges(domain);
3335
3336 /* calculate AGAW */
3337 domain->gaw = guest_width;
3338 adjust_width = guestwidth_to_adjustwidth(guest_width);
3339 domain->agaw = width_to_agaw(adjust_width);
3340
3341 INIT_LIST_HEAD(&domain->devices);
3342
3343 domain->iommu_count = 0;
3344 domain->iommu_coherency = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003345 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003346
3347 /* always allocate the top pgd */
3348 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3349 if (!domain->pgd)
3350 return -ENOMEM;
3351 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3352 return 0;
3353}
3354
3355static void iommu_free_vm_domain(struct dmar_domain *domain)
3356{
3357 unsigned long flags;
3358 struct dmar_drhd_unit *drhd;
3359 struct intel_iommu *iommu;
3360 unsigned long i;
3361 unsigned long ndomains;
3362
3363 for_each_drhd_unit(drhd) {
3364 if (drhd->ignored)
3365 continue;
3366 iommu = drhd->iommu;
3367
3368 ndomains = cap_ndoms(iommu->cap);
3369 i = find_first_bit(iommu->domain_ids, ndomains);
3370 for (; i < ndomains; ) {
3371 if (iommu->domains[i] == domain) {
3372 spin_lock_irqsave(&iommu->lock, flags);
3373 clear_bit(i, iommu->domain_ids);
3374 iommu->domains[i] = NULL;
3375 spin_unlock_irqrestore(&iommu->lock, flags);
3376 break;
3377 }
3378 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3379 }
3380 }
3381}
3382
3383static void vm_domain_exit(struct dmar_domain *domain)
3384{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003385 /* Domain 0 is reserved, so dont process it */
3386 if (!domain)
3387 return;
3388
3389 vm_domain_remove_all_dev_info(domain);
3390 /* destroy iovas */
3391 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003392
3393 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003394 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003395
3396 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003397 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003398
3399 iommu_free_vm_domain(domain);
3400 free_domain_mem(domain);
3401}
3402
Joerg Roedel5d450802008-12-03 14:52:32 +01003403static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003404{
Joerg Roedel5d450802008-12-03 14:52:32 +01003405 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003406
Joerg Roedel5d450802008-12-03 14:52:32 +01003407 dmar_domain = iommu_alloc_vm_domain();
3408 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003409 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003410 "intel_iommu_domain_init: dmar_domain == NULL\n");
3411 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003412 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003413 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003414 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003415 "intel_iommu_domain_init() failed\n");
3416 vm_domain_exit(dmar_domain);
3417 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003418 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003419 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003420
Joerg Roedel5d450802008-12-03 14:52:32 +01003421 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003422}
Kay, Allen M38717942008-09-09 18:37:29 +03003423
Joerg Roedel5d450802008-12-03 14:52:32 +01003424static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003425{
Joerg Roedel5d450802008-12-03 14:52:32 +01003426 struct dmar_domain *dmar_domain = domain->priv;
3427
3428 domain->priv = NULL;
3429 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003430}
Kay, Allen M38717942008-09-09 18:37:29 +03003431
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003432static int intel_iommu_attach_device(struct iommu_domain *domain,
3433 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003434{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003435 struct dmar_domain *dmar_domain = domain->priv;
3436 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003437 struct intel_iommu *iommu;
3438 int addr_width;
3439 u64 end;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003440 int ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003441
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003442 /* normally pdev is not mapped */
3443 if (unlikely(domain_context_mapped(pdev))) {
3444 struct dmar_domain *old_domain;
3445
3446 old_domain = find_domain(pdev);
3447 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003448 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3449 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3450 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003451 else
3452 domain_remove_dev_info(old_domain);
3453 }
3454 }
3455
David Woodhouse276dbf992009-04-04 01:45:37 +01003456 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3457 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003458 if (!iommu)
3459 return -ENODEV;
3460
3461 /* check if this iommu agaw is sufficient for max mapped address */
3462 addr_width = agaw_to_width(iommu->agaw);
3463 end = DOMAIN_MAX_ADDR(addr_width);
3464 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003465 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003466 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3467 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003468 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003469 return -EFAULT;
3470 }
3471
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003472 ret = domain_add_dev_info(dmar_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003473 if (ret)
3474 return ret;
3475
Yu Zhao93a23a72009-05-18 13:51:37 +08003476 ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003477 return ret;
3478}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003479
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003480static void intel_iommu_detach_device(struct iommu_domain *domain,
3481 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003482{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003483 struct dmar_domain *dmar_domain = domain->priv;
3484 struct pci_dev *pdev = to_pci_dev(dev);
3485
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003486 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003487}
Kay, Allen M38717942008-09-09 18:37:29 +03003488
Joerg Roedeldde57a22008-12-03 15:04:09 +01003489static int intel_iommu_map_range(struct iommu_domain *domain,
3490 unsigned long iova, phys_addr_t hpa,
3491 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003492{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003493 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003494 u64 max_addr;
3495 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003496 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003497 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003498
Joerg Roedeldde57a22008-12-03 15:04:09 +01003499 if (iommu_prot & IOMMU_READ)
3500 prot |= DMA_PTE_READ;
3501 if (iommu_prot & IOMMU_WRITE)
3502 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003503 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3504 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003505
David Woodhouse163cc522009-06-28 00:51:17 +01003506 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003507 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003508 int min_agaw;
3509 u64 end;
3510
3511 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003512 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003513 addr_width = agaw_to_width(min_agaw);
3514 end = DOMAIN_MAX_ADDR(addr_width);
3515 end = end & VTD_PAGE_MASK;
3516 if (end < max_addr) {
3517 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3518 "sufficient for the mapped address (%llx)\n",
3519 __func__, min_agaw, max_addr);
3520 return -EFAULT;
3521 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003522 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003523 }
David Woodhousead051222009-06-28 14:22:28 +01003524 /* Round up size to next multiple of PAGE_SIZE, if it and
3525 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003526 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003527 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3528 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003529 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003530}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003531
Joerg Roedeldde57a22008-12-03 15:04:09 +01003532static void intel_iommu_unmap_range(struct iommu_domain *domain,
3533 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003534{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003535 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003536
David Woodhouse163cc522009-06-28 00:51:17 +01003537 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3538 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003539
David Woodhouse163cc522009-06-28 00:51:17 +01003540 if (dmar_domain->max_addr == iova + size)
3541 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003542}
Kay, Allen M38717942008-09-09 18:37:29 +03003543
Joerg Roedeld14d6572008-12-03 15:06:57 +01003544static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3545 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003546{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003547 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003548 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003549 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003550
David Woodhouseb026fd22009-06-28 10:37:25 +01003551 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003552 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003553 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003554
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003555 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003556}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003557
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003558static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3559 unsigned long cap)
3560{
3561 struct dmar_domain *dmar_domain = domain->priv;
3562
3563 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3564 return dmar_domain->iommu_snooping;
3565
3566 return 0;
3567}
3568
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003569static struct iommu_ops intel_iommu_ops = {
3570 .domain_init = intel_iommu_domain_init,
3571 .domain_destroy = intel_iommu_domain_destroy,
3572 .attach_dev = intel_iommu_attach_device,
3573 .detach_dev = intel_iommu_detach_device,
3574 .map = intel_iommu_map_range,
3575 .unmap = intel_iommu_unmap_range,
3576 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003577 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003578};
David Woodhouse9af88142009-02-13 23:18:03 +00003579
3580static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3581{
3582 /*
3583 * Mobile 4 Series Chipset neglects to set RWBF capability,
3584 * but needs it:
3585 */
3586 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3587 rwbf_quirk = 1;
3588}
3589
3590DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);