blob: b1e97e6825009295088d62b183f8136616bbe917 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100041#include <linux/dmi.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090043#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include "pci.h"
45
Fenghua Yu5b6985c2008-10-16 18:02:32 -070046#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070051#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
53#define IOAPIC_RANGE_START (0xfee00000)
54#define IOAPIC_RANGE_END (0xfeefffff)
55#define IOVA_START_ADDR (0x1000)
56
57#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
58
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070059#define MAX_AGAW_WIDTH 64
60
David Woodhouse2ebe3152009-09-19 07:34:04 -070061#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
62#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
63
64/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
65 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
66#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
67 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
68#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070069
Mark McLoughlinf27be032008-11-20 15:49:43 +000070#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070071#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070072#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080073
David Woodhousefd18de52009-05-10 23:57:41 +010074
David Woodhousedd4e8312009-06-27 16:21:20 +010075/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */
77static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
78{
79 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
80}
81
82static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
83{
84 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
85}
86static inline unsigned long page_to_dma_pfn(struct page *pg)
87{
88 return mm_to_dma_pfn(page_to_pfn(pg));
89}
90static inline unsigned long virt_to_dma_pfn(void *p)
91{
92 return page_to_dma_pfn(virt_to_page(p));
93}
94
Weidong Hand9630fe2008-12-08 11:06:32 +080095/* global iommu list, set NULL for ignored DMAR units */
96static struct intel_iommu **g_iommus;
97
David Woodhousee0fc7e02009-09-30 09:12:17 -070098static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +000099static int rwbf_quirk;
100
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000101/*
102 * 0: Present
103 * 1-11: Reserved
104 * 12-63: Context Ptr (12 - (haw-1))
105 * 64-127: Reserved
106 */
107struct root_entry {
108 u64 val;
109 u64 rsvd1;
110};
111#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
112static inline bool root_present(struct root_entry *root)
113{
114 return (root->val & 1);
115}
116static inline void set_root_present(struct root_entry *root)
117{
118 root->val |= 1;
119}
120static inline void set_root_value(struct root_entry *root, unsigned long value)
121{
122 root->val |= value & VTD_PAGE_MASK;
123}
124
125static inline struct context_entry *
126get_context_addr_from_root(struct root_entry *root)
127{
128 return (struct context_entry *)
129 (root_present(root)?phys_to_virt(
130 root->val & VTD_PAGE_MASK) :
131 NULL);
132}
133
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000134/*
135 * low 64 bits:
136 * 0: present
137 * 1: fault processing disable
138 * 2-3: translation type
139 * 12-63: address space root
140 * high 64 bits:
141 * 0-2: address width
142 * 3-6: aval
143 * 8-23: domain id
144 */
145struct context_entry {
146 u64 lo;
147 u64 hi;
148};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000149
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000150static inline bool context_present(struct context_entry *context)
151{
152 return (context->lo & 1);
153}
154static inline void context_set_present(struct context_entry *context)
155{
156 context->lo |= 1;
157}
158
159static inline void context_set_fault_enable(struct context_entry *context)
160{
161 context->lo &= (((u64)-1) << 2) | 1;
162}
163
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000164static inline void context_set_translation_type(struct context_entry *context,
165 unsigned long value)
166{
167 context->lo &= (((u64)-1) << 4) | 3;
168 context->lo |= (value & 3) << 2;
169}
170
171static inline void context_set_address_root(struct context_entry *context,
172 unsigned long value)
173{
174 context->lo |= value & VTD_PAGE_MASK;
175}
176
177static inline void context_set_address_width(struct context_entry *context,
178 unsigned long value)
179{
180 context->hi |= value & 7;
181}
182
183static inline void context_set_domain_id(struct context_entry *context,
184 unsigned long value)
185{
186 context->hi |= (value & ((1 << 16) - 1)) << 8;
187}
188
189static inline void context_clear_entry(struct context_entry *context)
190{
191 context->lo = 0;
192 context->hi = 0;
193}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000194
Mark McLoughlin622ba122008-11-20 15:49:46 +0000195/*
196 * 0: readable
197 * 1: writable
198 * 2-6: reserved
199 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800200 * 8-10: available
201 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000202 * 12-63: Host physcial address
203 */
204struct dma_pte {
205 u64 val;
206};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000207
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000208static inline void dma_clear_pte(struct dma_pte *pte)
209{
210 pte->val = 0;
211}
212
213static inline void dma_set_pte_readable(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_READ;
216}
217
218static inline void dma_set_pte_writable(struct dma_pte *pte)
219{
220 pte->val |= DMA_PTE_WRITE;
221}
222
Sheng Yang9cf066972009-03-18 15:33:07 +0800223static inline void dma_set_pte_snp(struct dma_pte *pte)
224{
225 pte->val |= DMA_PTE_SNP;
226}
227
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000228static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
229{
230 pte->val = (pte->val & ~3) | (prot & 3);
231}
232
233static inline u64 dma_pte_addr(struct dma_pte *pte)
234{
David Woodhousec85994e2009-07-01 19:21:24 +0100235#ifdef CONFIG_64BIT
236 return pte->val & VTD_PAGE_MASK;
237#else
238 /* Must have a full atomic 64-bit read */
239 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
240#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000241}
242
David Woodhousedd4e8312009-06-27 16:21:20 +0100243static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000244{
David Woodhousedd4e8312009-06-27 16:21:20 +0100245 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000246}
247
248static inline bool dma_pte_present(struct dma_pte *pte)
249{
250 return (pte->val & 3) != 0;
251}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000252
David Woodhouse75e6bf92009-07-02 11:21:16 +0100253static inline int first_pte_in_page(struct dma_pte *pte)
254{
255 return !((unsigned long)pte & ~VTD_PAGE_MASK);
256}
257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700258/*
259 * This domain is a statically identity mapping domain.
260 * 1. This domain creats a static 1:1 mapping to all usable memory.
261 * 2. It maps to each iommu if successful.
262 * 3. Each iommu mapps to this domain if successful.
263 */
David Woodhouse19943b02009-08-04 16:19:20 +0100264static struct dmar_domain *si_domain;
265static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700266
Weidong Han3b5410e2008-12-08 09:17:15 +0800267/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100268#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800269
Weidong Han1ce28fe2008-12-08 16:35:39 +0800270/* domain represents a virtual machine, more than one devices
271 * across iommus may be owned in one domain, e.g. kvm guest.
272 */
273#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
274
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700275/* si_domain contains mulitple devices */
276#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
277
Mark McLoughlin99126f72008-11-20 15:49:47 +0000278struct dmar_domain {
279 int id; /* domain id */
Weidong Han8c11e792008-12-08 15:29:22 +0800280 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000281
282 struct list_head devices; /* all devices' list */
283 struct iova_domain iovad; /* iova's that belong to this domain */
284
285 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000286 int gaw; /* max guest address width */
287
288 /* adjusted guest address width, 0 is level 2 30-bit */
289 int agaw;
290
Weidong Han3b5410e2008-12-08 09:17:15 +0800291 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800292
293 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800294 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800295 int iommu_count; /* reference count of iommu */
296 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800297 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000298};
299
Mark McLoughlina647dac2008-11-20 15:49:48 +0000300/* PCI domain-device relationship */
301struct device_domain_info {
302 struct list_head link; /* link to domain siblings */
303 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100304 int segment; /* PCI domain */
305 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000306 u8 devfn; /* PCI devfn number */
307 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800308 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000309 struct dmar_domain *domain; /* pointer to domain */
310};
311
mark gross5e0d2a62008-03-04 15:22:08 -0800312static void flush_unmaps_timeout(unsigned long data);
313
314DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
315
mark gross80b20dd2008-04-18 13:53:58 -0700316#define HIGH_WATER_MARK 250
317struct deferred_flush_tables {
318 int next;
319 struct iova *iova[HIGH_WATER_MARK];
320 struct dmar_domain *domain[HIGH_WATER_MARK];
321};
322
323static struct deferred_flush_tables *deferred_flush;
324
mark gross5e0d2a62008-03-04 15:22:08 -0800325/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800326static int g_num_of_iommus;
327
328static DEFINE_SPINLOCK(async_umap_flush_lock);
329static LIST_HEAD(unmaps_to_do);
330
331static int timer_on;
332static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800333
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700334static void domain_remove_dev_info(struct dmar_domain *domain);
335
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800336#ifdef CONFIG_DMAR_DEFAULT_ON
337int dmar_disabled = 0;
338#else
339int dmar_disabled = 1;
340#endif /*CONFIG_DMAR_DEFAULT_ON*/
341
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700342static int __initdata dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700343static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800344static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700345
346#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
347static DEFINE_SPINLOCK(device_domain_lock);
348static LIST_HEAD(device_domain_list);
349
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100350static struct iommu_ops intel_iommu_ops;
351
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700352static int __init intel_iommu_setup(char *str)
353{
354 if (!str)
355 return -EINVAL;
356 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800357 if (!strncmp(str, "on", 2)) {
358 dmar_disabled = 0;
359 printk(KERN_INFO "Intel-IOMMU: enabled\n");
360 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700361 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800362 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700363 } else if (!strncmp(str, "igfx_off", 8)) {
364 dmar_map_gfx = 0;
365 printk(KERN_INFO
366 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700367 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800368 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700369 "Intel-IOMMU: Forcing DAC for PCI devices\n");
370 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800371 } else if (!strncmp(str, "strict", 6)) {
372 printk(KERN_INFO
373 "Intel-IOMMU: disable batched IOTLB flush\n");
374 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700375 }
376
377 str += strcspn(str, ",");
378 while (*str == ',')
379 str++;
380 }
381 return 0;
382}
383__setup("intel_iommu=", intel_iommu_setup);
384
385static struct kmem_cache *iommu_domain_cache;
386static struct kmem_cache *iommu_devinfo_cache;
387static struct kmem_cache *iommu_iova_cache;
388
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700389static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
390{
391 unsigned int flags;
392 void *vaddr;
393
394 /* trying to avoid low memory issues */
395 flags = current->flags & PF_MEMALLOC;
396 current->flags |= PF_MEMALLOC;
397 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
398 current->flags &= (~PF_MEMALLOC | flags);
399 return vaddr;
400}
401
402
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700403static inline void *alloc_pgtable_page(void)
404{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700405 unsigned int flags;
406 void *vaddr;
407
408 /* trying to avoid low memory issues */
409 flags = current->flags & PF_MEMALLOC;
410 current->flags |= PF_MEMALLOC;
411 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
412 current->flags &= (~PF_MEMALLOC | flags);
413 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700414}
415
416static inline void free_pgtable_page(void *vaddr)
417{
418 free_page((unsigned long)vaddr);
419}
420
421static inline void *alloc_domain_mem(void)
422{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700423 return iommu_kmem_cache_alloc(iommu_domain_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700424}
425
Kay, Allen M38717942008-09-09 18:37:29 +0300426static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700427{
428 kmem_cache_free(iommu_domain_cache, vaddr);
429}
430
431static inline void * alloc_devinfo_mem(void)
432{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700433 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434}
435
436static inline void free_devinfo_mem(void *vaddr)
437{
438 kmem_cache_free(iommu_devinfo_cache, vaddr);
439}
440
441struct iova *alloc_iova_mem(void)
442{
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700443 return iommu_kmem_cache_alloc(iommu_iova_cache);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700444}
445
446void free_iova_mem(struct iova *iova)
447{
448 kmem_cache_free(iommu_iova_cache, iova);
449}
450
Weidong Han1b573682008-12-08 15:34:06 +0800451
452static inline int width_to_agaw(int width);
453
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700454static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800455{
456 unsigned long sagaw;
457 int agaw = -1;
458
459 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700460 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800461 agaw >= 0; agaw--) {
462 if (test_bit(agaw, &sagaw))
463 break;
464 }
465
466 return agaw;
467}
468
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700469/*
470 * Calculate max SAGAW for each iommu.
471 */
472int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
473{
474 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
475}
476
477/*
478 * calculate agaw for each iommu.
479 * "SAGAW" may be different across iommus, use a default agaw, and
480 * get a supported less agaw for iommus that don't support the default agaw.
481 */
482int iommu_calculate_agaw(struct intel_iommu *iommu)
483{
484 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
485}
486
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700487/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800488static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
489{
490 int iommu_id;
491
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700492 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800493 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700494 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800495
Weidong Han8c11e792008-12-08 15:29:22 +0800496 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
497 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
498 return NULL;
499
500 return g_iommus[iommu_id];
501}
502
Weidong Han8e6040972008-12-08 15:49:06 +0800503static void domain_update_iommu_coherency(struct dmar_domain *domain)
504{
505 int i;
506
507 domain->iommu_coherency = 1;
508
509 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
510 for (; i < g_num_of_iommus; ) {
511 if (!ecap_coherent(g_iommus[i]->ecap)) {
512 domain->iommu_coherency = 0;
513 break;
514 }
515 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
516 }
517}
518
Sheng Yang58c610b2009-03-18 15:33:05 +0800519static void domain_update_iommu_snooping(struct dmar_domain *domain)
520{
521 int i;
522
523 domain->iommu_snooping = 1;
524
525 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
526 for (; i < g_num_of_iommus; ) {
527 if (!ecap_sc_support(g_iommus[i]->ecap)) {
528 domain->iommu_snooping = 0;
529 break;
530 }
531 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
532 }
533}
534
535/* Some capabilities may be different across iommus */
536static void domain_update_iommu_cap(struct dmar_domain *domain)
537{
538 domain_update_iommu_coherency(domain);
539 domain_update_iommu_snooping(domain);
540}
541
David Woodhouse276dbf992009-04-04 01:45:37 +0100542static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800543{
544 struct dmar_drhd_unit *drhd = NULL;
545 int i;
546
547 for_each_drhd_unit(drhd) {
548 if (drhd->ignored)
549 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100550 if (segment != drhd->segment)
551 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800552
David Woodhouse924b6232009-04-04 00:39:25 +0100553 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000554 if (drhd->devices[i] &&
555 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800556 drhd->devices[i]->devfn == devfn)
557 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700558 if (drhd->devices[i] &&
559 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100560 drhd->devices[i]->subordinate->number <= bus &&
561 drhd->devices[i]->subordinate->subordinate >= bus)
562 return drhd->iommu;
563 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800564
565 if (drhd->include_all)
566 return drhd->iommu;
567 }
568
569 return NULL;
570}
571
Weidong Han5331fe62008-12-08 23:00:00 +0800572static void domain_flush_cache(struct dmar_domain *domain,
573 void *addr, int size)
574{
575 if (!domain->iommu_coherency)
576 clflush_cache_range(addr, size);
577}
578
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700579/* Gets context entry for a given bus and devfn */
580static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
581 u8 bus, u8 devfn)
582{
583 struct root_entry *root;
584 struct context_entry *context;
585 unsigned long phy_addr;
586 unsigned long flags;
587
588 spin_lock_irqsave(&iommu->lock, flags);
589 root = &iommu->root_entry[bus];
590 context = get_context_addr_from_root(root);
591 if (!context) {
592 context = (struct context_entry *)alloc_pgtable_page();
593 if (!context) {
594 spin_unlock_irqrestore(&iommu->lock, flags);
595 return NULL;
596 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700597 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700598 phy_addr = virt_to_phys((void *)context);
599 set_root_value(root, phy_addr);
600 set_root_present(root);
601 __iommu_flush_cache(iommu, root, sizeof(*root));
602 }
603 spin_unlock_irqrestore(&iommu->lock, flags);
604 return &context[devfn];
605}
606
607static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
608{
609 struct root_entry *root;
610 struct context_entry *context;
611 int ret;
612 unsigned long flags;
613
614 spin_lock_irqsave(&iommu->lock, flags);
615 root = &iommu->root_entry[bus];
616 context = get_context_addr_from_root(root);
617 if (!context) {
618 ret = 0;
619 goto out;
620 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000621 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700622out:
623 spin_unlock_irqrestore(&iommu->lock, flags);
624 return ret;
625}
626
627static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
628{
629 struct root_entry *root;
630 struct context_entry *context;
631 unsigned long flags;
632
633 spin_lock_irqsave(&iommu->lock, flags);
634 root = &iommu->root_entry[bus];
635 context = get_context_addr_from_root(root);
636 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000637 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700638 __iommu_flush_cache(iommu, &context[devfn], \
639 sizeof(*context));
640 }
641 spin_unlock_irqrestore(&iommu->lock, flags);
642}
643
644static void free_context_table(struct intel_iommu *iommu)
645{
646 struct root_entry *root;
647 int i;
648 unsigned long flags;
649 struct context_entry *context;
650
651 spin_lock_irqsave(&iommu->lock, flags);
652 if (!iommu->root_entry) {
653 goto out;
654 }
655 for (i = 0; i < ROOT_ENTRY_NR; i++) {
656 root = &iommu->root_entry[i];
657 context = get_context_addr_from_root(root);
658 if (context)
659 free_pgtable_page(context);
660 }
661 free_pgtable_page(iommu->root_entry);
662 iommu->root_entry = NULL;
663out:
664 spin_unlock_irqrestore(&iommu->lock, flags);
665}
666
667/* page table handling */
668#define LEVEL_STRIDE (9)
669#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
670
671static inline int agaw_to_level(int agaw)
672{
673 return agaw + 2;
674}
675
676static inline int agaw_to_width(int agaw)
677{
678 return 30 + agaw * LEVEL_STRIDE;
679
680}
681
682static inline int width_to_agaw(int width)
683{
684 return (width - 30) / LEVEL_STRIDE;
685}
686
687static inline unsigned int level_to_offset_bits(int level)
688{
David Woodhouse6660c632009-06-27 22:41:00 +0100689 return (level - 1) * LEVEL_STRIDE;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700690}
691
David Woodhouse77dfa562009-06-27 16:40:08 +0100692static inline int pfn_level_offset(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700693{
David Woodhouse6660c632009-06-27 22:41:00 +0100694 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700695}
696
David Woodhouse6660c632009-06-27 22:41:00 +0100697static inline unsigned long level_mask(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700698{
David Woodhouse6660c632009-06-27 22:41:00 +0100699 return -1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700}
701
David Woodhouse6660c632009-06-27 22:41:00 +0100702static inline unsigned long level_size(int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700703{
David Woodhouse6660c632009-06-27 22:41:00 +0100704 return 1UL << level_to_offset_bits(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700705}
706
David Woodhouse6660c632009-06-27 22:41:00 +0100707static inline unsigned long align_to_level(unsigned long pfn, int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700708{
David Woodhouse6660c632009-06-27 22:41:00 +0100709 return (pfn + level_size(level) - 1) & level_mask(level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700710}
711
David Woodhouseb026fd22009-06-28 10:37:25 +0100712static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
713 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700714{
David Woodhouseb026fd22009-06-28 10:37:25 +0100715 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716 struct dma_pte *parent, *pte = NULL;
717 int level = agaw_to_level(domain->agaw);
718 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700719
720 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100721 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700722 parent = domain->pgd;
723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700724 while (level > 0) {
725 void *tmp_page;
726
David Woodhouseb026fd22009-06-28 10:37:25 +0100727 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700728 pte = &parent[offset];
729 if (level == 1)
730 break;
731
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000732 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100733 uint64_t pteval;
734
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700735 tmp_page = alloc_pgtable_page();
736
David Woodhouse206a73c12009-07-01 19:30:28 +0100737 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700738 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100739
David Woodhousec85994e2009-07-01 19:21:24 +0100740 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400741 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100742 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
743 /* Someone else set it while we were thinking; use theirs. */
744 free_pgtable_page(tmp_page);
745 } else {
746 dma_pte_addr(pte);
747 domain_flush_cache(domain, pte, sizeof(*pte));
748 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000750 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700751 level--;
752 }
753
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 return pte;
755}
756
757/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100758static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
759 unsigned long pfn,
760 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700761{
762 struct dma_pte *parent, *pte = NULL;
763 int total = agaw_to_level(domain->agaw);
764 int offset;
765
766 parent = domain->pgd;
767 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100768 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700769 pte = &parent[offset];
770 if (level == total)
771 return pte;
772
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000773 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700774 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000775 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 total--;
777 }
778 return NULL;
779}
780
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700781/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100782static void dma_pte_clear_range(struct dmar_domain *domain,
783 unsigned long start_pfn,
784 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700785{
David Woodhouse04b18e62009-06-27 19:15:01 +0100786 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100787 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788
David Woodhouse04b18e62009-06-27 19:15:01 +0100789 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100790 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700791 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100792
David Woodhouse04b18e62009-06-27 19:15:01 +0100793 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700794 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100795 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
796 if (!pte) {
797 start_pfn = align_to_level(start_pfn + 1, 2);
798 continue;
799 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100800 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100801 dma_clear_pte(pte);
802 start_pfn++;
803 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100804 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
805
David Woodhouse310a5ab2009-06-28 18:52:20 +0100806 domain_flush_cache(domain, first_pte,
807 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700808
809 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700810}
811
812/* free page table pages. last level pte should already be cleared */
813static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100814 unsigned long start_pfn,
815 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700816{
David Woodhouse6660c632009-06-27 22:41:00 +0100817 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100818 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 int total = agaw_to_level(domain->agaw);
820 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100821 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822
David Woodhouse6660c632009-06-27 22:41:00 +0100823 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
824 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700825 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826
David Woodhousef3a0a522009-06-30 03:40:07 +0100827 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700828 level = 2;
829 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100830 tmp = align_to_level(start_pfn, level);
831
David Woodhousef3a0a522009-06-30 03:40:07 +0100832 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100833 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 return;
835
David Woodhouse59c36282009-09-19 07:36:28 -0700836 do {
David Woodhousef3a0a522009-06-30 03:40:07 +0100837 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
838 if (!pte) {
839 tmp = align_to_level(tmp + 1, level + 1);
840 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700841 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100842 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100843 if (dma_pte_present(pte)) {
844 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
845 dma_clear_pte(pte);
846 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100847 pte++;
848 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100849 } while (!first_pte_in_page(pte) &&
850 tmp + level_size(level) - 1 <= last_pfn);
851
David Woodhousef3a0a522009-06-30 03:40:07 +0100852 domain_flush_cache(domain, first_pte,
853 (void *)pte - (void *)first_pte);
854
David Woodhouse59c36282009-09-19 07:36:28 -0700855 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700856 level++;
857 }
858 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100859 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860 free_pgtable_page(domain->pgd);
861 domain->pgd = NULL;
862 }
863}
864
865/* iommu handling */
866static int iommu_alloc_root_entry(struct intel_iommu *iommu)
867{
868 struct root_entry *root;
869 unsigned long flags;
870
871 root = (struct root_entry *)alloc_pgtable_page();
872 if (!root)
873 return -ENOMEM;
874
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700875 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876
877 spin_lock_irqsave(&iommu->lock, flags);
878 iommu->root_entry = root;
879 spin_unlock_irqrestore(&iommu->lock, flags);
880
881 return 0;
882}
883
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884static void iommu_set_root_entry(struct intel_iommu *iommu)
885{
886 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100887 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888 unsigned long flag;
889
890 addr = iommu->root_entry;
891
892 spin_lock_irqsave(&iommu->register_lock, flag);
893 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
894
David Woodhousec416daa2009-05-10 20:30:58 +0100895 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896
897 /* Make sure hardware complete it */
898 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100899 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900
901 spin_unlock_irqrestore(&iommu->register_lock, flag);
902}
903
904static void iommu_flush_write_buffer(struct intel_iommu *iommu)
905{
906 u32 val;
907 unsigned long flag;
908
David Woodhouse9af88142009-02-13 23:18:03 +0000909 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911
912 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100913 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914
915 /* Make sure hardware complete it */
916 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100917 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700918
919 spin_unlock_irqrestore(&iommu->register_lock, flag);
920}
921
922/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100923static void __iommu_flush_context(struct intel_iommu *iommu,
924 u16 did, u16 source_id, u8 function_mask,
925 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926{
927 u64 val = 0;
928 unsigned long flag;
929
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930 switch (type) {
931 case DMA_CCMD_GLOBAL_INVL:
932 val = DMA_CCMD_GLOBAL_INVL;
933 break;
934 case DMA_CCMD_DOMAIN_INVL:
935 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
936 break;
937 case DMA_CCMD_DEVICE_INVL:
938 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
939 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
940 break;
941 default:
942 BUG();
943 }
944 val |= DMA_CCMD_ICC;
945
946 spin_lock_irqsave(&iommu->register_lock, flag);
947 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
948
949 /* Make sure hardware complete it */
950 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
951 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
952
953 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954}
955
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700956/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100957static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
958 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700959{
960 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
961 u64 val = 0, val_iva = 0;
962 unsigned long flag;
963
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964 switch (type) {
965 case DMA_TLB_GLOBAL_FLUSH:
966 /* global flush doesn't need set IVA_REG */
967 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
968 break;
969 case DMA_TLB_DSI_FLUSH:
970 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
971 break;
972 case DMA_TLB_PSI_FLUSH:
973 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
974 /* Note: always flush non-leaf currently */
975 val_iva = size_order | addr;
976 break;
977 default:
978 BUG();
979 }
980 /* Note: set drain read/write */
981#if 0
982 /*
983 * This is probably to be super secure.. Looks like we can
984 * ignore it without any impact.
985 */
986 if (cap_read_drain(iommu->cap))
987 val |= DMA_TLB_READ_DRAIN;
988#endif
989 if (cap_write_drain(iommu->cap))
990 val |= DMA_TLB_WRITE_DRAIN;
991
992 spin_lock_irqsave(&iommu->register_lock, flag);
993 /* Note: Only uses first TLB reg currently */
994 if (val_iva)
995 dmar_writeq(iommu->reg + tlb_offset, val_iva);
996 dmar_writeq(iommu->reg + tlb_offset + 8, val);
997
998 /* Make sure hardware complete it */
999 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1000 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1001
1002 spin_unlock_irqrestore(&iommu->register_lock, flag);
1003
1004 /* check IOTLB invalidation granularity */
1005 if (DMA_TLB_IAIG(val) == 0)
1006 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1007 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1008 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001009 (unsigned long long)DMA_TLB_IIRG(type),
1010 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001011}
1012
Yu Zhao93a23a72009-05-18 13:51:37 +08001013static struct device_domain_info *iommu_support_dev_iotlb(
1014 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015{
Yu Zhao93a23a72009-05-18 13:51:37 +08001016 int found = 0;
1017 unsigned long flags;
1018 struct device_domain_info *info;
1019 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1020
1021 if (!ecap_dev_iotlb_support(iommu->ecap))
1022 return NULL;
1023
1024 if (!iommu->qi)
1025 return NULL;
1026
1027 spin_lock_irqsave(&device_domain_lock, flags);
1028 list_for_each_entry(info, &domain->devices, link)
1029 if (info->bus == bus && info->devfn == devfn) {
1030 found = 1;
1031 break;
1032 }
1033 spin_unlock_irqrestore(&device_domain_lock, flags);
1034
1035 if (!found || !info->dev)
1036 return NULL;
1037
1038 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1039 return NULL;
1040
1041 if (!dmar_find_matched_atsr_unit(info->dev))
1042 return NULL;
1043
1044 info->iommu = iommu;
1045
1046 return info;
1047}
1048
1049static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1050{
1051 if (!info)
1052 return;
1053
1054 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1055}
1056
1057static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1058{
1059 if (!info->dev || !pci_ats_enabled(info->dev))
1060 return;
1061
1062 pci_disable_ats(info->dev);
1063}
1064
1065static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1066 u64 addr, unsigned mask)
1067{
1068 u16 sid, qdep;
1069 unsigned long flags;
1070 struct device_domain_info *info;
1071
1072 spin_lock_irqsave(&device_domain_lock, flags);
1073 list_for_each_entry(info, &domain->devices, link) {
1074 if (!info->dev || !pci_ats_enabled(info->dev))
1075 continue;
1076
1077 sid = info->bus << 8 | info->devfn;
1078 qdep = pci_ats_queue_depth(info->dev);
1079 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1080 }
1081 spin_unlock_irqrestore(&device_domain_lock, flags);
1082}
1083
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001084static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
David Woodhouse03d6a242009-06-28 15:33:46 +01001085 unsigned long pfn, unsigned int pages)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001086{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001087 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001088 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001089
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001090 BUG_ON(pages == 0);
1091
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001092 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001093 * Fallback to domain selective flush if no PSI support or the size is
1094 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001095 * PSI requires page size to be 2 ^ x, and the base address is naturally
1096 * aligned to the size
1097 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001098 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1099 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001100 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001101 else
1102 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1103 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001104
1105 /*
1106 * In caching mode, domain ID 0 is reserved for non-present to present
1107 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1108 */
1109 if (!cap_caching_mode(iommu->cap) || did)
Yu Zhao93a23a72009-05-18 13:51:37 +08001110 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001111}
1112
mark grossf8bab732008-02-08 04:18:38 -08001113static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1114{
1115 u32 pmen;
1116 unsigned long flags;
1117
1118 spin_lock_irqsave(&iommu->register_lock, flags);
1119 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1120 pmen &= ~DMA_PMEN_EPM;
1121 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1122
1123 /* wait for the protected region status bit to clear */
1124 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1125 readl, !(pmen & DMA_PMEN_PRS), pmen);
1126
1127 spin_unlock_irqrestore(&iommu->register_lock, flags);
1128}
1129
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001130static int iommu_enable_translation(struct intel_iommu *iommu)
1131{
1132 u32 sts;
1133 unsigned long flags;
1134
1135 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001136 iommu->gcmd |= DMA_GCMD_TE;
1137 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001138
1139 /* Make sure hardware complete it */
1140 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001141 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001142
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001143 spin_unlock_irqrestore(&iommu->register_lock, flags);
1144 return 0;
1145}
1146
1147static int iommu_disable_translation(struct intel_iommu *iommu)
1148{
1149 u32 sts;
1150 unsigned long flag;
1151
1152 spin_lock_irqsave(&iommu->register_lock, flag);
1153 iommu->gcmd &= ~DMA_GCMD_TE;
1154 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1155
1156 /* Make sure hardware complete it */
1157 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001158 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159
1160 spin_unlock_irqrestore(&iommu->register_lock, flag);
1161 return 0;
1162}
1163
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001164
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165static int iommu_init_domains(struct intel_iommu *iommu)
1166{
1167 unsigned long ndomains;
1168 unsigned long nlongs;
1169
1170 ndomains = cap_ndoms(iommu->cap);
1171 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1172 nlongs = BITS_TO_LONGS(ndomains);
1173
Donald Dutile94a91b52009-08-20 16:51:34 -04001174 spin_lock_init(&iommu->lock);
1175
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001176 /* TBD: there might be 64K domains,
1177 * consider other allocation for future chip
1178 */
1179 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1180 if (!iommu->domain_ids) {
1181 printk(KERN_ERR "Allocating domain id array failed\n");
1182 return -ENOMEM;
1183 }
1184 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1185 GFP_KERNEL);
1186 if (!iommu->domains) {
1187 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188 return -ENOMEM;
1189 }
1190
1191 /*
1192 * if Caching mode is set, then invalid translations are tagged
1193 * with domainid 0. Hence we need to pre-allocate it.
1194 */
1195 if (cap_caching_mode(iommu->cap))
1196 set_bit(0, iommu->domain_ids);
1197 return 0;
1198}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001200
1201static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001202static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001203
1204void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001205{
1206 struct dmar_domain *domain;
1207 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001208 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001209
Donald Dutile94a91b52009-08-20 16:51:34 -04001210 if ((iommu->domains) && (iommu->domain_ids)) {
1211 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1212 for (; i < cap_ndoms(iommu->cap); ) {
1213 domain = iommu->domains[i];
1214 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001215
Donald Dutile94a91b52009-08-20 16:51:34 -04001216 spin_lock_irqsave(&domain->iommu_lock, flags);
1217 if (--domain->iommu_count == 0) {
1218 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1219 vm_domain_exit(domain);
1220 else
1221 domain_exit(domain);
1222 }
1223 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1224
1225 i = find_next_bit(iommu->domain_ids,
1226 cap_ndoms(iommu->cap), i+1);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001227 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 }
1229
1230 if (iommu->gcmd & DMA_GCMD_TE)
1231 iommu_disable_translation(iommu);
1232
1233 if (iommu->irq) {
1234 set_irq_data(iommu->irq, NULL);
1235 /* This will mask the irq */
1236 free_irq(iommu->irq, iommu);
1237 destroy_irq(iommu->irq);
1238 }
1239
1240 kfree(iommu->domains);
1241 kfree(iommu->domain_ids);
1242
Weidong Hand9630fe2008-12-08 11:06:32 +08001243 g_iommus[iommu->seq_id] = NULL;
1244
1245 /* if all iommus are freed, free g_iommus */
1246 for (i = 0; i < g_num_of_iommus; i++) {
1247 if (g_iommus[i])
1248 break;
1249 }
1250
1251 if (i == g_num_of_iommus)
1252 kfree(g_iommus);
1253
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254 /* free context mapping */
1255 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001256}
1257
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001258static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001259{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261
1262 domain = alloc_domain_mem();
1263 if (!domain)
1264 return NULL;
1265
Weidong Han8c11e792008-12-08 15:29:22 +08001266 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001267 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001268
1269 return domain;
1270}
1271
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001272static int iommu_attach_domain(struct dmar_domain *domain,
1273 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001274{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001275 int num;
1276 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001277 unsigned long flags;
1278
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001279 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001280
1281 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001282
1283 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1284 if (num >= ndomains) {
1285 spin_unlock_irqrestore(&iommu->lock, flags);
1286 printk(KERN_ERR "IOMMU: no free domain ids\n");
1287 return -ENOMEM;
1288 }
1289
1290 domain->id = num;
1291 set_bit(num, iommu->domain_ids);
1292 set_bit(iommu->seq_id, &domain->iommu_bmp);
1293 iommu->domains[num] = domain;
1294 spin_unlock_irqrestore(&iommu->lock, flags);
1295
1296 return 0;
1297}
1298
1299static void iommu_detach_domain(struct dmar_domain *domain,
1300 struct intel_iommu *iommu)
1301{
1302 unsigned long flags;
1303 int num, ndomains;
1304 int found = 0;
1305
1306 spin_lock_irqsave(&iommu->lock, flags);
1307 ndomains = cap_ndoms(iommu->cap);
1308 num = find_first_bit(iommu->domain_ids, ndomains);
1309 for (; num < ndomains; ) {
1310 if (iommu->domains[num] == domain) {
1311 found = 1;
1312 break;
1313 }
1314 num = find_next_bit(iommu->domain_ids,
1315 cap_ndoms(iommu->cap), num+1);
1316 }
1317
1318 if (found) {
1319 clear_bit(num, iommu->domain_ids);
1320 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1321 iommu->domains[num] = NULL;
1322 }
Weidong Han8c11e792008-12-08 15:29:22 +08001323 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001324}
1325
1326static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001327static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001328
1329static void dmar_init_reserved_ranges(void)
1330{
1331 struct pci_dev *pdev = NULL;
1332 struct iova *iova;
1333 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001334
David Millerf6611972008-02-06 01:36:23 -08001335 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001336
Mark Gross8a443df2008-03-04 14:59:31 -08001337 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1338 &reserved_rbtree_key);
1339
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001340 /* IOAPIC ranges shouldn't be accessed by DMA */
1341 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1342 IOVA_PFN(IOAPIC_RANGE_END));
1343 if (!iova)
1344 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1345
1346 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1347 for_each_pci_dev(pdev) {
1348 struct resource *r;
1349
1350 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1351 r = &pdev->resource[i];
1352 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1353 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001354 iova = reserve_iova(&reserved_iova_list,
1355 IOVA_PFN(r->start),
1356 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001357 if (!iova)
1358 printk(KERN_ERR "Reserve iova failed\n");
1359 }
1360 }
1361
1362}
1363
1364static void domain_reserve_special_ranges(struct dmar_domain *domain)
1365{
1366 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1367}
1368
1369static inline int guestwidth_to_adjustwidth(int gaw)
1370{
1371 int agaw;
1372 int r = (gaw - 12) % 9;
1373
1374 if (r == 0)
1375 agaw = gaw;
1376 else
1377 agaw = gaw + 9 - r;
1378 if (agaw > 64)
1379 agaw = 64;
1380 return agaw;
1381}
1382
1383static int domain_init(struct dmar_domain *domain, int guest_width)
1384{
1385 struct intel_iommu *iommu;
1386 int adjust_width, agaw;
1387 unsigned long sagaw;
1388
David Millerf6611972008-02-06 01:36:23 -08001389 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001390 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001391
1392 domain_reserve_special_ranges(domain);
1393
1394 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001395 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001396 if (guest_width > cap_mgaw(iommu->cap))
1397 guest_width = cap_mgaw(iommu->cap);
1398 domain->gaw = guest_width;
1399 adjust_width = guestwidth_to_adjustwidth(guest_width);
1400 agaw = width_to_agaw(adjust_width);
1401 sagaw = cap_sagaw(iommu->cap);
1402 if (!test_bit(agaw, &sagaw)) {
1403 /* hardware doesn't support it, choose a bigger one */
1404 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1405 agaw = find_next_bit(&sagaw, 5, agaw);
1406 if (agaw >= 5)
1407 return -ENODEV;
1408 }
1409 domain->agaw = agaw;
1410 INIT_LIST_HEAD(&domain->devices);
1411
Weidong Han8e6040972008-12-08 15:49:06 +08001412 if (ecap_coherent(iommu->ecap))
1413 domain->iommu_coherency = 1;
1414 else
1415 domain->iommu_coherency = 0;
1416
Sheng Yang58c610b2009-03-18 15:33:05 +08001417 if (ecap_sc_support(iommu->ecap))
1418 domain->iommu_snooping = 1;
1419 else
1420 domain->iommu_snooping = 0;
1421
Weidong Hanc7151a82008-12-08 22:51:37 +08001422 domain->iommu_count = 1;
1423
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1426 if (!domain->pgd)
1427 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429 return 0;
1430}
1431
1432static void domain_exit(struct dmar_domain *domain)
1433{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001434 struct dmar_drhd_unit *drhd;
1435 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436
1437 /* Domain 0 is reserved, so dont process it */
1438 if (!domain)
1439 return;
1440
1441 domain_remove_dev_info(domain);
1442 /* destroy iovas */
1443 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444
1445 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001446 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001447
1448 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001449 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001450
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001451 for_each_active_iommu(iommu, drhd)
1452 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1453 iommu_detach_domain(domain, iommu);
1454
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001455 free_domain_mem(domain);
1456}
1457
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001458static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1459 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001460{
1461 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001462 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001463 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001464 struct dma_pte *pgd;
1465 unsigned long num;
1466 unsigned long ndomains;
1467 int id;
1468 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001469 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001470
1471 pr_debug("Set context mapping for %02x:%02x.%d\n",
1472 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001473
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001474 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001475 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1476 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001477
David Woodhouse276dbf992009-04-04 01:45:37 +01001478 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001479 if (!iommu)
1480 return -ENODEV;
1481
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001482 context = device_to_context_entry(iommu, bus, devfn);
1483 if (!context)
1484 return -ENOMEM;
1485 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001486 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001487 spin_unlock_irqrestore(&iommu->lock, flags);
1488 return 0;
1489 }
1490
Weidong Hanea6606b2008-12-08 23:08:15 +08001491 id = domain->id;
1492 pgd = domain->pgd;
1493
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001494 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1495 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001496 int found = 0;
1497
1498 /* find an available domain id for this device in iommu */
1499 ndomains = cap_ndoms(iommu->cap);
1500 num = find_first_bit(iommu->domain_ids, ndomains);
1501 for (; num < ndomains; ) {
1502 if (iommu->domains[num] == domain) {
1503 id = num;
1504 found = 1;
1505 break;
1506 }
1507 num = find_next_bit(iommu->domain_ids,
1508 cap_ndoms(iommu->cap), num+1);
1509 }
1510
1511 if (found == 0) {
1512 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1513 if (num >= ndomains) {
1514 spin_unlock_irqrestore(&iommu->lock, flags);
1515 printk(KERN_ERR "IOMMU: no free domain ids\n");
1516 return -EFAULT;
1517 }
1518
1519 set_bit(num, iommu->domain_ids);
1520 iommu->domains[num] = domain;
1521 id = num;
1522 }
1523
1524 /* Skip top levels of page tables for
1525 * iommu which has less agaw than default.
1526 */
1527 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1528 pgd = phys_to_virt(dma_pte_addr(pgd));
1529 if (!dma_pte_present(pgd)) {
1530 spin_unlock_irqrestore(&iommu->lock, flags);
1531 return -ENOMEM;
1532 }
1533 }
1534 }
1535
1536 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001537
Yu Zhao93a23a72009-05-18 13:51:37 +08001538 if (translation != CONTEXT_TT_PASS_THROUGH) {
1539 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1540 translation = info ? CONTEXT_TT_DEV_IOTLB :
1541 CONTEXT_TT_MULTI_LEVEL;
1542 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001543 /*
1544 * In pass through mode, AW must be programmed to indicate the largest
1545 * AGAW value supported by hardware. And ASR is ignored by hardware.
1546 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001547 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001548 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001549 else {
1550 context_set_address_root(context, virt_to_phys(pgd));
1551 context_set_address_width(context, iommu->agaw);
1552 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001553
1554 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001555 context_set_fault_enable(context);
1556 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001557 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001558
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001559 /*
1560 * It's a non-present to present mapping. If hardware doesn't cache
1561 * non-present entry we only need to flush the write-buffer. If the
1562 * _does_ cache non-present entries, then it does so in the special
1563 * domain #0, which we have to flush:
1564 */
1565 if (cap_caching_mode(iommu->cap)) {
1566 iommu->flush.flush_context(iommu, 0,
1567 (((u16)bus) << 8) | devfn,
1568 DMA_CCMD_MASK_NOBIT,
1569 DMA_CCMD_DEVICE_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001570 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001571 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001573 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001574 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001575 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001576
1577 spin_lock_irqsave(&domain->iommu_lock, flags);
1578 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1579 domain->iommu_count++;
Sheng Yang58c610b2009-03-18 15:33:05 +08001580 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001581 }
1582 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001583 return 0;
1584}
1585
1586static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001587domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1588 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001589{
1590 int ret;
1591 struct pci_dev *tmp, *parent;
1592
David Woodhouse276dbf992009-04-04 01:45:37 +01001593 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001594 pdev->bus->number, pdev->devfn,
1595 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001596 if (ret)
1597 return ret;
1598
1599 /* dependent device mapping */
1600 tmp = pci_find_upstream_pcie_bridge(pdev);
1601 if (!tmp)
1602 return 0;
1603 /* Secondary interface's bus number and devfn 0 */
1604 parent = pdev->bus->self;
1605 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001606 ret = domain_context_mapping_one(domain,
1607 pci_domain_nr(parent->bus),
1608 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001609 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001610 if (ret)
1611 return ret;
1612 parent = parent->bus->self;
1613 }
1614 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1615 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001616 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001617 tmp->subordinate->number, 0,
1618 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001619 else /* this is a legacy PCI bridge */
1620 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001621 pci_domain_nr(tmp->bus),
1622 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001623 tmp->devfn,
1624 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001625}
1626
Weidong Han5331fe62008-12-08 23:00:00 +08001627static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001628{
1629 int ret;
1630 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001631 struct intel_iommu *iommu;
1632
David Woodhouse276dbf992009-04-04 01:45:37 +01001633 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1634 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001635 if (!iommu)
1636 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001637
David Woodhouse276dbf992009-04-04 01:45:37 +01001638 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001639 if (!ret)
1640 return ret;
1641 /* dependent device mapping */
1642 tmp = pci_find_upstream_pcie_bridge(pdev);
1643 if (!tmp)
1644 return ret;
1645 /* Secondary interface's bus number and devfn 0 */
1646 parent = pdev->bus->self;
1647 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001648 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001649 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650 if (!ret)
1651 return ret;
1652 parent = parent->bus->self;
1653 }
1654 if (tmp->is_pcie)
David Woodhouse276dbf992009-04-04 01:45:37 +01001655 return device_context_mapped(iommu, tmp->subordinate->number,
1656 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001657 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001658 return device_context_mapped(iommu, tmp->bus->number,
1659 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001660}
1661
Fenghua Yuf5329592009-08-04 15:09:37 -07001662/* Returns a number of VTD pages, but aligned to MM page size */
1663static inline unsigned long aligned_nrpages(unsigned long host_addr,
1664 size_t size)
1665{
1666 host_addr &= ~PAGE_MASK;
1667 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1668}
1669
David Woodhouse9051aa02009-06-29 12:30:54 +01001670static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1671 struct scatterlist *sg, unsigned long phys_pfn,
1672 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001673{
1674 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001675 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001676 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001677 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001678
1679 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1680
1681 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1682 return -EINVAL;
1683
1684 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1685
David Woodhouse9051aa02009-06-29 12:30:54 +01001686 if (sg)
1687 sg_res = 0;
1688 else {
1689 sg_res = nr_pages + 1;
1690 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1691 }
1692
David Woodhousee1605492009-06-29 11:17:38 +01001693 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001694 uint64_t tmp;
1695
David Woodhousee1605492009-06-29 11:17:38 +01001696 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001697 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001698 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1699 sg->dma_length = sg->length;
1700 pteval = page_to_phys(sg_page(sg)) | prot;
1701 }
1702 if (!pte) {
1703 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1704 if (!pte)
1705 return -ENOMEM;
1706 }
1707 /* We don't need lock here, nobody else
1708 * touches the iova range
1709 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001710 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001711 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001712 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001713 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1714 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001715 if (dumps) {
1716 dumps--;
1717 debug_dma_dump_mappings(NULL);
1718 }
1719 WARN_ON(1);
1720 }
David Woodhousee1605492009-06-29 11:17:38 +01001721 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001722 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001723 domain_flush_cache(domain, first_pte,
1724 (void *)pte - (void *)first_pte);
1725 pte = NULL;
1726 }
1727 iov_pfn++;
1728 pteval += VTD_PAGE_SIZE;
1729 sg_res--;
1730 if (!sg_res)
1731 sg = sg_next(sg);
1732 }
1733 return 0;
1734}
1735
David Woodhouse9051aa02009-06-29 12:30:54 +01001736static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1737 struct scatterlist *sg, unsigned long nr_pages,
1738 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001739{
David Woodhouse9051aa02009-06-29 12:30:54 +01001740 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1741}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001742
David Woodhouse9051aa02009-06-29 12:30:54 +01001743static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1744 unsigned long phys_pfn, unsigned long nr_pages,
1745 int prot)
1746{
1747 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001748}
1749
Weidong Hanc7151a82008-12-08 22:51:37 +08001750static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751{
Weidong Hanc7151a82008-12-08 22:51:37 +08001752 if (!iommu)
1753 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001754
1755 clear_context_table(iommu, bus, devfn);
1756 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001757 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001758 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001759}
1760
1761static void domain_remove_dev_info(struct dmar_domain *domain)
1762{
1763 struct device_domain_info *info;
1764 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001765 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766
1767 spin_lock_irqsave(&device_domain_lock, flags);
1768 while (!list_empty(&domain->devices)) {
1769 info = list_entry(domain->devices.next,
1770 struct device_domain_info, link);
1771 list_del(&info->link);
1772 list_del(&info->global);
1773 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001774 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001775 spin_unlock_irqrestore(&device_domain_lock, flags);
1776
Yu Zhao93a23a72009-05-18 13:51:37 +08001777 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001778 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001779 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001780 free_devinfo_mem(info);
1781
1782 spin_lock_irqsave(&device_domain_lock, flags);
1783 }
1784 spin_unlock_irqrestore(&device_domain_lock, flags);
1785}
1786
1787/*
1788 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001789 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001790 */
Kay, Allen M38717942008-09-09 18:37:29 +03001791static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001792find_domain(struct pci_dev *pdev)
1793{
1794 struct device_domain_info *info;
1795
1796 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001797 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 if (info)
1799 return info->domain;
1800 return NULL;
1801}
1802
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001803/* domain is initialized */
1804static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1805{
1806 struct dmar_domain *domain, *found = NULL;
1807 struct intel_iommu *iommu;
1808 struct dmar_drhd_unit *drhd;
1809 struct device_domain_info *info, *tmp;
1810 struct pci_dev *dev_tmp;
1811 unsigned long flags;
1812 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001813 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001814 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001815
1816 domain = find_domain(pdev);
1817 if (domain)
1818 return domain;
1819
David Woodhouse276dbf992009-04-04 01:45:37 +01001820 segment = pci_domain_nr(pdev->bus);
1821
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001822 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1823 if (dev_tmp) {
1824 if (dev_tmp->is_pcie) {
1825 bus = dev_tmp->subordinate->number;
1826 devfn = 0;
1827 } else {
1828 bus = dev_tmp->bus->number;
1829 devfn = dev_tmp->devfn;
1830 }
1831 spin_lock_irqsave(&device_domain_lock, flags);
1832 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001833 if (info->segment == segment &&
1834 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001835 found = info->domain;
1836 break;
1837 }
1838 }
1839 spin_unlock_irqrestore(&device_domain_lock, flags);
1840 /* pcie-pci bridge already has a domain, uses it */
1841 if (found) {
1842 domain = found;
1843 goto found_domain;
1844 }
1845 }
1846
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001847 domain = alloc_domain();
1848 if (!domain)
1849 goto error;
1850
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001851 /* Allocate new domain for the device */
1852 drhd = dmar_find_matched_drhd_unit(pdev);
1853 if (!drhd) {
1854 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1855 pci_name(pdev));
1856 return NULL;
1857 }
1858 iommu = drhd->iommu;
1859
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001860 ret = iommu_attach_domain(domain, iommu);
1861 if (ret) {
1862 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001863 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001864 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001865
1866 if (domain_init(domain, gaw)) {
1867 domain_exit(domain);
1868 goto error;
1869 }
1870
1871 /* register pcie-to-pci device */
1872 if (dev_tmp) {
1873 info = alloc_devinfo_mem();
1874 if (!info) {
1875 domain_exit(domain);
1876 goto error;
1877 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001878 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879 info->bus = bus;
1880 info->devfn = devfn;
1881 info->dev = NULL;
1882 info->domain = domain;
1883 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001884 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001885
1886 /* pcie-to-pci bridge already has a domain, uses it */
1887 found = NULL;
1888 spin_lock_irqsave(&device_domain_lock, flags);
1889 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001890 if (tmp->segment == segment &&
1891 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001892 found = tmp->domain;
1893 break;
1894 }
1895 }
1896 if (found) {
1897 free_devinfo_mem(info);
1898 domain_exit(domain);
1899 domain = found;
1900 } else {
1901 list_add(&info->link, &domain->devices);
1902 list_add(&info->global, &device_domain_list);
1903 }
1904 spin_unlock_irqrestore(&device_domain_lock, flags);
1905 }
1906
1907found_domain:
1908 info = alloc_devinfo_mem();
1909 if (!info)
1910 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001911 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912 info->bus = pdev->bus->number;
1913 info->devfn = pdev->devfn;
1914 info->dev = pdev;
1915 info->domain = domain;
1916 spin_lock_irqsave(&device_domain_lock, flags);
1917 /* somebody is fast */
1918 found = find_domain(pdev);
1919 if (found != NULL) {
1920 spin_unlock_irqrestore(&device_domain_lock, flags);
1921 if (found != domain) {
1922 domain_exit(domain);
1923 domain = found;
1924 }
1925 free_devinfo_mem(info);
1926 return domain;
1927 }
1928 list_add(&info->link, &domain->devices);
1929 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001930 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931 spin_unlock_irqrestore(&device_domain_lock, flags);
1932 return domain;
1933error:
1934 /* recheck it here, maybe others set it */
1935 return find_domain(pdev);
1936}
1937
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001938static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07001939#define IDENTMAP_ALL 1
1940#define IDENTMAP_GFX 2
1941#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001942
David Woodhouseb2132032009-06-26 18:50:28 +01001943static int iommu_domain_identity_map(struct dmar_domain *domain,
1944 unsigned long long start,
1945 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001946{
David Woodhousec5395d52009-06-28 16:35:56 +01001947 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1948 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001949
David Woodhousec5395d52009-06-28 16:35:56 +01001950 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1951 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001952 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001953 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001954 }
1955
David Woodhousec5395d52009-06-28 16:35:56 +01001956 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1957 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001958 /*
1959 * RMRR range might have overlap with physical memory range,
1960 * clear it first
1961 */
David Woodhousec5395d52009-06-28 16:35:56 +01001962 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963
David Woodhousec5395d52009-06-28 16:35:56 +01001964 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1965 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001966 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001967}
1968
1969static int iommu_prepare_identity_map(struct pci_dev *pdev,
1970 unsigned long long start,
1971 unsigned long long end)
1972{
1973 struct dmar_domain *domain;
1974 int ret;
1975
David Woodhousec7ab48d2009-06-26 19:10:36 +01001976 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001977 if (!domain)
1978 return -ENOMEM;
1979
David Woodhouse19943b02009-08-04 16:19:20 +01001980 /* For _hardware_ passthrough, don't bother. But for software
1981 passthrough, we do it anyway -- it may indicate a memory
1982 range which is reserved in E820, so which didn't get set
1983 up to start with in si_domain */
1984 if (domain == si_domain && hw_pass_through) {
1985 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1986 pci_name(pdev), start, end);
1987 return 0;
1988 }
1989
1990 printk(KERN_INFO
1991 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1992 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01001993
1994 if (end >> agaw_to_width(domain->agaw)) {
1995 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1996 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1997 agaw_to_width(domain->agaw),
1998 dmi_get_system_info(DMI_BIOS_VENDOR),
1999 dmi_get_system_info(DMI_BIOS_VERSION),
2000 dmi_get_system_info(DMI_PRODUCT_VERSION));
2001 ret = -EIO;
2002 goto error;
2003 }
David Woodhouse19943b02009-08-04 16:19:20 +01002004
David Woodhouseb2132032009-06-26 18:50:28 +01002005 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002006 if (ret)
2007 goto error;
2008
2009 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002010 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002011 if (ret)
2012 goto error;
2013
2014 return 0;
2015
2016 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017 domain_exit(domain);
2018 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002019}
2020
2021static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2022 struct pci_dev *pdev)
2023{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002024 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025 return 0;
2026 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2027 rmrr->end_address + 1);
2028}
2029
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002030#ifdef CONFIG_DMAR_FLOPPY_WA
2031static inline void iommu_prepare_isa(void)
2032{
2033 struct pci_dev *pdev;
2034 int ret;
2035
2036 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2037 if (!pdev)
2038 return;
2039
David Woodhousec7ab48d2009-06-26 19:10:36 +01002040 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002041 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2042
2043 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002044 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2045 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002046
2047}
2048#else
2049static inline void iommu_prepare_isa(void)
2050{
2051 return;
2052}
2053#endif /* !CONFIG_DMAR_FLPY_WA */
2054
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002055static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002056
2057static int __init si_domain_work_fn(unsigned long start_pfn,
2058 unsigned long end_pfn, void *datax)
2059{
2060 int *ret = datax;
2061
2062 *ret = iommu_domain_identity_map(si_domain,
2063 (uint64_t)start_pfn << PAGE_SHIFT,
2064 (uint64_t)end_pfn << PAGE_SHIFT);
2065 return *ret;
2066
2067}
2068
Matt Kraai071e1372009-08-23 22:30:22 -07002069static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002070{
2071 struct dmar_drhd_unit *drhd;
2072 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002073 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002074
2075 si_domain = alloc_domain();
2076 if (!si_domain)
2077 return -EFAULT;
2078
David Woodhousec7ab48d2009-06-26 19:10:36 +01002079 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002080
2081 for_each_active_iommu(iommu, drhd) {
2082 ret = iommu_attach_domain(si_domain, iommu);
2083 if (ret) {
2084 domain_exit(si_domain);
2085 return -EFAULT;
2086 }
2087 }
2088
2089 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2090 domain_exit(si_domain);
2091 return -EFAULT;
2092 }
2093
2094 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2095
David Woodhouse19943b02009-08-04 16:19:20 +01002096 if (hw)
2097 return 0;
2098
David Woodhousec7ab48d2009-06-26 19:10:36 +01002099 for_each_online_node(nid) {
2100 work_with_active_regions(nid, si_domain_work_fn, &ret);
2101 if (ret)
2102 return ret;
2103 }
2104
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002105 return 0;
2106}
2107
2108static void domain_remove_one_dev_info(struct dmar_domain *domain,
2109 struct pci_dev *pdev);
2110static int identity_mapping(struct pci_dev *pdev)
2111{
2112 struct device_domain_info *info;
2113
2114 if (likely(!iommu_identity_mapping))
2115 return 0;
2116
2117
2118 list_for_each_entry(info, &si_domain->devices, link)
2119 if (info->dev == pdev)
2120 return 1;
2121 return 0;
2122}
2123
2124static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002125 struct pci_dev *pdev,
2126 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002127{
2128 struct device_domain_info *info;
2129 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002130 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002131
2132 info = alloc_devinfo_mem();
2133 if (!info)
2134 return -ENOMEM;
2135
David Woodhouse5fe60f42009-08-09 10:53:41 +01002136 ret = domain_context_mapping(domain, pdev, translation);
2137 if (ret) {
2138 free_devinfo_mem(info);
2139 return ret;
2140 }
2141
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002142 info->segment = pci_domain_nr(pdev->bus);
2143 info->bus = pdev->bus->number;
2144 info->devfn = pdev->devfn;
2145 info->dev = pdev;
2146 info->domain = domain;
2147
2148 spin_lock_irqsave(&device_domain_lock, flags);
2149 list_add(&info->link, &domain->devices);
2150 list_add(&info->global, &device_domain_list);
2151 pdev->dev.archdata.iommu = info;
2152 spin_unlock_irqrestore(&device_domain_lock, flags);
2153
2154 return 0;
2155}
2156
David Woodhouse6941af22009-07-04 18:24:27 +01002157static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2158{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002159 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2160 return 1;
2161
2162 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2163 return 1;
2164
2165 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2166 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002167
David Woodhouse3dfc8132009-07-04 19:11:08 +01002168 /*
2169 * We want to start off with all devices in the 1:1 domain, and
2170 * take them out later if we find they can't access all of memory.
2171 *
2172 * However, we can't do this for PCI devices behind bridges,
2173 * because all PCI devices behind the same bridge will end up
2174 * with the same source-id on their transactions.
2175 *
2176 * Practically speaking, we can't change things around for these
2177 * devices at run-time, because we can't be sure there'll be no
2178 * DMA transactions in flight for any of their siblings.
2179 *
2180 * So PCI devices (unless they're on the root bus) as well as
2181 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2182 * the 1:1 domain, just in _case_ one of their siblings turns out
2183 * not to be able to map all of memory.
2184 */
2185 if (!pdev->is_pcie) {
2186 if (!pci_is_root_bus(pdev->bus))
2187 return 0;
2188 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2189 return 0;
2190 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2191 return 0;
2192
2193 /*
2194 * At boot time, we don't yet know if devices will be 64-bit capable.
2195 * Assume that they will -- if they turn out not to be, then we can
2196 * take them out of the 1:1 domain later.
2197 */
David Woodhouse6941af22009-07-04 18:24:27 +01002198 if (!startup)
2199 return pdev->dma_mask > DMA_BIT_MASK(32);
2200
2201 return 1;
2202}
2203
Matt Kraai071e1372009-08-23 22:30:22 -07002204static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002205{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002206 struct pci_dev *pdev = NULL;
2207 int ret;
2208
David Woodhouse19943b02009-08-04 16:19:20 +01002209 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002210 if (ret)
2211 return -EFAULT;
2212
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002213 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002214 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002215 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2216 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002217
David Woodhouse5fe60f42009-08-09 10:53:41 +01002218 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002219 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002220 CONTEXT_TT_MULTI_LEVEL);
2221 if (ret)
2222 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002223 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002224 }
2225
2226 return 0;
2227}
2228
2229int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002230{
2231 struct dmar_drhd_unit *drhd;
2232 struct dmar_rmrr_unit *rmrr;
2233 struct pci_dev *pdev;
2234 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002235 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002236
2237 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002238 * for each drhd
2239 * allocate root
2240 * initialize and program root entry to not present
2241 * endfor
2242 */
2243 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002244 g_num_of_iommus++;
2245 /*
2246 * lock not needed as this is only incremented in the single
2247 * threaded kernel __init code path all other access are read
2248 * only
2249 */
2250 }
2251
Weidong Hand9630fe2008-12-08 11:06:32 +08002252 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2253 GFP_KERNEL);
2254 if (!g_iommus) {
2255 printk(KERN_ERR "Allocating global iommu array failed\n");
2256 ret = -ENOMEM;
2257 goto error;
2258 }
2259
mark gross80b20dd2008-04-18 13:53:58 -07002260 deferred_flush = kzalloc(g_num_of_iommus *
2261 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2262 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002263 ret = -ENOMEM;
2264 goto error;
2265 }
2266
mark gross5e0d2a62008-03-04 15:22:08 -08002267 for_each_drhd_unit(drhd) {
2268 if (drhd->ignored)
2269 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002270
2271 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002272 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002273
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002274 ret = iommu_init_domains(iommu);
2275 if (ret)
2276 goto error;
2277
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002278 /*
2279 * TBD:
2280 * we could share the same root & context tables
2281 * amoung all IOMMU's. Need to Split it later.
2282 */
2283 ret = iommu_alloc_root_entry(iommu);
2284 if (ret) {
2285 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2286 goto error;
2287 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002288 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002289 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002290 }
2291
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002292 /*
2293 * Start from the sane iommu hardware state.
2294 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002295 for_each_drhd_unit(drhd) {
2296 if (drhd->ignored)
2297 continue;
2298
2299 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002300
2301 /*
2302 * If the queued invalidation is already initialized by us
2303 * (for example, while enabling interrupt-remapping) then
2304 * we got the things already rolling from a sane state.
2305 */
2306 if (iommu->qi)
2307 continue;
2308
2309 /*
2310 * Clear any previous faults.
2311 */
2312 dmar_fault(-1, iommu);
2313 /*
2314 * Disable queued invalidation if supported and already enabled
2315 * before OS handover.
2316 */
2317 dmar_disable_qi(iommu);
2318 }
2319
2320 for_each_drhd_unit(drhd) {
2321 if (drhd->ignored)
2322 continue;
2323
2324 iommu = drhd->iommu;
2325
Youquan Songa77b67d2008-10-16 16:31:56 -07002326 if (dmar_enable_qi(iommu)) {
2327 /*
2328 * Queued Invalidate not enabled, use Register Based
2329 * Invalidate
2330 */
2331 iommu->flush.flush_context = __iommu_flush_context;
2332 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2333 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002334 "invalidation\n",
2335 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002336 } else {
2337 iommu->flush.flush_context = qi_flush_context;
2338 iommu->flush.flush_iotlb = qi_flush_iotlb;
2339 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002340 "invalidation\n",
2341 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002342 }
2343 }
2344
David Woodhouse19943b02009-08-04 16:19:20 +01002345 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002346 iommu_identity_mapping |= IDENTMAP_ALL;
2347
David Woodhouse19943b02009-08-04 16:19:20 +01002348#ifdef CONFIG_DMAR_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002349 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002350#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002351
2352 check_tylersburg_isoch();
2353
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002354 /*
2355 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002356 * identity mappings for rmrr, gfx, and isa and may fall back to static
2357 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002358 */
David Woodhouse19943b02009-08-04 16:19:20 +01002359 if (iommu_identity_mapping) {
2360 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2361 if (ret) {
2362 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2363 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002364 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002365 }
David Woodhouse19943b02009-08-04 16:19:20 +01002366 /*
2367 * For each rmrr
2368 * for each dev attached to rmrr
2369 * do
2370 * locate drhd for dev, alloc domain for dev
2371 * allocate free domain
2372 * allocate page table entries for rmrr
2373 * if context not allocated for bus
2374 * allocate and init context
2375 * set present in root table for this bus
2376 * init context with domain, translation etc
2377 * endfor
2378 * endfor
2379 */
2380 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2381 for_each_rmrr_units(rmrr) {
2382 for (i = 0; i < rmrr->devices_cnt; i++) {
2383 pdev = rmrr->devices[i];
2384 /*
2385 * some BIOS lists non-exist devices in DMAR
2386 * table.
2387 */
2388 if (!pdev)
2389 continue;
2390 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2391 if (ret)
2392 printk(KERN_ERR
2393 "IOMMU: mapping reserved region failed\n");
2394 }
2395 }
2396
2397 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002398
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399 /*
2400 * for each drhd
2401 * enable fault log
2402 * global invalidate context cache
2403 * global invalidate iotlb
2404 * enable translation
2405 */
2406 for_each_drhd_unit(drhd) {
2407 if (drhd->ignored)
2408 continue;
2409 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002410
2411 iommu_flush_write_buffer(iommu);
2412
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002413 ret = dmar_set_interrupt(iommu);
2414 if (ret)
2415 goto error;
2416
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002417 iommu_set_root_entry(iommu);
2418
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002419 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002420 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002422 ret = iommu_enable_translation(iommu);
2423 if (ret)
2424 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002425
2426 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002427 }
2428
2429 return 0;
2430error:
2431 for_each_drhd_unit(drhd) {
2432 if (drhd->ignored)
2433 continue;
2434 iommu = drhd->iommu;
2435 free_iommu(iommu);
2436 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002437 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002438 return ret;
2439}
2440
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002441/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002442static struct iova *intel_alloc_iova(struct device *dev,
2443 struct dmar_domain *domain,
2444 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002445{
2446 struct pci_dev *pdev = to_pci_dev(dev);
2447 struct iova *iova = NULL;
2448
David Woodhouse875764d2009-06-28 21:20:51 +01002449 /* Restrict dma_mask to the width that the iommu can handle */
2450 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2451
2452 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002453 /*
2454 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002455 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002456 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002457 */
David Woodhouse875764d2009-06-28 21:20:51 +01002458 iova = alloc_iova(&domain->iovad, nrpages,
2459 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2460 if (iova)
2461 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002462 }
David Woodhouse875764d2009-06-28 21:20:51 +01002463 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2464 if (unlikely(!iova)) {
2465 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2466 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002467 return NULL;
2468 }
2469
2470 return iova;
2471}
2472
David Woodhouse147202a2009-07-07 19:43:20 +01002473static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002474{
2475 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002476 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002477
2478 domain = get_domain_for_dev(pdev,
2479 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2480 if (!domain) {
2481 printk(KERN_ERR
2482 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002483 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002484 }
2485
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002486 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002487 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002488 ret = domain_context_mapping(domain, pdev,
2489 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002490 if (ret) {
2491 printk(KERN_ERR
2492 "Domain context map for %s failed",
2493 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002494 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002495 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002496 }
2497
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002498 return domain;
2499}
2500
David Woodhouse147202a2009-07-07 19:43:20 +01002501static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2502{
2503 struct device_domain_info *info;
2504
2505 /* No lock here, assumes no domain exit in normal case */
2506 info = dev->dev.archdata.iommu;
2507 if (likely(info))
2508 return info->domain;
2509
2510 return __get_valid_domain_for_dev(dev);
2511}
2512
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002513static int iommu_dummy(struct pci_dev *pdev)
2514{
2515 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2516}
2517
2518/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002519static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002520{
David Woodhouse73676832009-07-04 14:08:36 +01002521 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002522 int found;
2523
David Woodhouse73676832009-07-04 14:08:36 +01002524 if (unlikely(dev->bus != &pci_bus_type))
2525 return 1;
2526
2527 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002528 if (iommu_dummy(pdev))
2529 return 1;
2530
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002531 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002532 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002533
2534 found = identity_mapping(pdev);
2535 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002536 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002537 return 1;
2538 else {
2539 /*
2540 * 32 bit DMA is removed from si_domain and fall back
2541 * to non-identity mapping.
2542 */
2543 domain_remove_one_dev_info(si_domain, pdev);
2544 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2545 pci_name(pdev));
2546 return 0;
2547 }
2548 } else {
2549 /*
2550 * In case of a detached 64 bit DMA device from vm, the device
2551 * is put into si_domain for identity mapping.
2552 */
David Woodhouse6941af22009-07-04 18:24:27 +01002553 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002554 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002555 ret = domain_add_dev_info(si_domain, pdev,
2556 hw_pass_through ?
2557 CONTEXT_TT_PASS_THROUGH :
2558 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002559 if (!ret) {
2560 printk(KERN_INFO "64bit %s uses identity mapping\n",
2561 pci_name(pdev));
2562 return 1;
2563 }
2564 }
2565 }
2566
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002567 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002568}
2569
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002570static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2571 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002572{
2573 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002574 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002575 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002576 struct iova *iova;
2577 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002578 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002579 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002580 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002581
2582 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002583
David Woodhouse73676832009-07-04 14:08:36 +01002584 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002585 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002586
2587 domain = get_valid_domain_for_dev(pdev);
2588 if (!domain)
2589 return 0;
2590
Weidong Han8c11e792008-12-08 15:29:22 +08002591 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002592 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002593
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002594 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2595 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002596 if (!iova)
2597 goto error;
2598
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002599 /*
2600 * Check if DMAR supports zero-length reads on write only
2601 * mappings..
2602 */
2603 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002604 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 prot |= DMA_PTE_READ;
2606 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2607 prot |= DMA_PTE_WRITE;
2608 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002609 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002611 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002612 * is not a big problem
2613 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002614 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002615 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002616 if (ret)
2617 goto error;
2618
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002619 /* it's a non-present to present mapping. Only flush if caching mode */
2620 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002621 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002622 else
Weidong Han8c11e792008-12-08 15:29:22 +08002623 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002624
David Woodhouse03d6a242009-06-28 15:33:46 +01002625 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2626 start_paddr += paddr & ~PAGE_MASK;
2627 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002628
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002629error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002630 if (iova)
2631 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002632 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002633 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002634 return 0;
2635}
2636
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002637static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2638 unsigned long offset, size_t size,
2639 enum dma_data_direction dir,
2640 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002641{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002642 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2643 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002644}
2645
mark gross5e0d2a62008-03-04 15:22:08 -08002646static void flush_unmaps(void)
2647{
mark gross80b20dd2008-04-18 13:53:58 -07002648 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002649
mark gross5e0d2a62008-03-04 15:22:08 -08002650 timer_on = 0;
2651
2652 /* just flush them all */
2653 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002654 struct intel_iommu *iommu = g_iommus[i];
2655 if (!iommu)
2656 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002657
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002658 if (!deferred_flush[i].next)
2659 continue;
2660
2661 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002662 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002663 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002664 unsigned long mask;
2665 struct iova *iova = deferred_flush[i].iova[j];
2666
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04002667 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
Yu Zhao93a23a72009-05-18 13:51:37 +08002668 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
Benjamin LaHaise64de5af2009-09-16 21:05:55 -04002669 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
Yu Zhao93a23a72009-05-18 13:51:37 +08002670 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002671 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002672 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002673 }
2674
mark gross5e0d2a62008-03-04 15:22:08 -08002675 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002676}
2677
2678static void flush_unmaps_timeout(unsigned long data)
2679{
mark gross80b20dd2008-04-18 13:53:58 -07002680 unsigned long flags;
2681
2682 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002683 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002684 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002685}
2686
2687static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2688{
2689 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002690 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002691 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002692
2693 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002694 if (list_size == HIGH_WATER_MARK)
2695 flush_unmaps();
2696
Weidong Han8c11e792008-12-08 15:29:22 +08002697 iommu = domain_get_iommu(dom);
2698 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002699
mark gross80b20dd2008-04-18 13:53:58 -07002700 next = deferred_flush[iommu_id].next;
2701 deferred_flush[iommu_id].domain[next] = dom;
2702 deferred_flush[iommu_id].iova[next] = iova;
2703 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002704
2705 if (!timer_on) {
2706 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2707 timer_on = 1;
2708 }
2709 list_size++;
2710 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2711}
2712
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002713static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2714 size_t size, enum dma_data_direction dir,
2715 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002716{
2717 struct pci_dev *pdev = to_pci_dev(dev);
2718 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002719 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002720 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002721 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002722
David Woodhouse73676832009-07-04 14:08:36 +01002723 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002725
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002726 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002727 BUG_ON(!domain);
2728
Weidong Han8c11e792008-12-08 15:29:22 +08002729 iommu = domain_get_iommu(domain);
2730
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002731 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002732 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2733 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002734 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002735
David Woodhoused794dc92009-06-28 00:27:49 +01002736 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2737 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002738
David Woodhoused794dc92009-06-28 00:27:49 +01002739 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2740 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002741
2742 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002743 dma_pte_clear_range(domain, start_pfn, last_pfn);
2744
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002745 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002746 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2747
mark gross5e0d2a62008-03-04 15:22:08 -08002748 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002749 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
David Woodhoused794dc92009-06-28 00:27:49 +01002750 last_pfn - start_pfn + 1);
mark gross5e0d2a62008-03-04 15:22:08 -08002751 /* free iova */
2752 __free_iova(&domain->iovad, iova);
2753 } else {
2754 add_unmap(domain, iova);
2755 /*
2756 * queue up the release of the unmap to save the 1/6th of the
2757 * cpu used up by the iotlb flush operation...
2758 */
mark gross5e0d2a62008-03-04 15:22:08 -08002759 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002760}
2761
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002762static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2763 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002764{
2765 void *vaddr;
2766 int order;
2767
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002768 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002769 order = get_order(size);
2770 flags &= ~(GFP_DMA | GFP_DMA32);
2771
2772 vaddr = (void *)__get_free_pages(flags, order);
2773 if (!vaddr)
2774 return NULL;
2775 memset(vaddr, 0, size);
2776
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002777 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2778 DMA_BIDIRECTIONAL,
2779 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002780 if (*dma_handle)
2781 return vaddr;
2782 free_pages((unsigned long)vaddr, order);
2783 return NULL;
2784}
2785
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002786static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2787 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002788{
2789 int order;
2790
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002791 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002792 order = get_order(size);
2793
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002794 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002795 free_pages((unsigned long)vaddr, order);
2796}
2797
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002798static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2799 int nelems, enum dma_data_direction dir,
2800 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002802 struct pci_dev *pdev = to_pci_dev(hwdev);
2803 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002804 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002805 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002806 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002807
David Woodhouse73676832009-07-04 14:08:36 +01002808 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002809 return;
2810
2811 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002812 BUG_ON(!domain);
2813
2814 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002815
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002816 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002817 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2818 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002819 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002820
David Woodhoused794dc92009-06-28 00:27:49 +01002821 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2822 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002823
2824 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002825 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002826
David Woodhoused794dc92009-06-28 00:27:49 +01002827 /* free page tables */
2828 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2829
David Woodhouseacea0012009-07-14 01:55:11 +01002830 if (intel_iommu_strict) {
2831 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2832 last_pfn - start_pfn + 1);
2833 /* free iova */
2834 __free_iova(&domain->iovad, iova);
2835 } else {
2836 add_unmap(domain, iova);
2837 /*
2838 * queue up the release of the unmap to save the 1/6th of the
2839 * cpu used up by the iotlb flush operation...
2840 */
2841 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002842}
2843
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002844static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002845 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002846{
2847 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002848 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002849
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002850 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002851 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002852 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002853 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002854 }
2855 return nelems;
2856}
2857
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002858static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2859 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002860{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002861 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002862 struct pci_dev *pdev = to_pci_dev(hwdev);
2863 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002864 size_t size = 0;
2865 int prot = 0;
David Woodhouseb536d242009-06-28 14:49:31 +01002866 size_t offset_pfn = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002867 struct iova *iova = NULL;
2868 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002869 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002870 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002871 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002872
2873 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002874 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002875 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002876
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002877 domain = get_valid_domain_for_dev(pdev);
2878 if (!domain)
2879 return 0;
2880
Weidong Han8c11e792008-12-08 15:29:22 +08002881 iommu = domain_get_iommu(domain);
2882
David Woodhouseb536d242009-06-28 14:49:31 +01002883 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002884 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002885
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002886 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2887 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002888 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002889 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002890 return 0;
2891 }
2892
2893 /*
2894 * Check if DMAR supports zero-length reads on write only
2895 * mappings..
2896 */
2897 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002898 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002899 prot |= DMA_PTE_READ;
2900 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2901 prot |= DMA_PTE_WRITE;
2902
David Woodhouseb536d242009-06-28 14:49:31 +01002903 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002904
Fenghua Yuf5329592009-08-04 15:09:37 -07002905 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002906 if (unlikely(ret)) {
2907 /* clear the page */
2908 dma_pte_clear_range(domain, start_vpfn,
2909 start_vpfn + size - 1);
2910 /* free page tables */
2911 dma_pte_free_pagetable(domain, start_vpfn,
2912 start_vpfn + size - 1);
2913 /* free iova */
2914 __free_iova(&domain->iovad, iova);
2915 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002916 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002917
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002918 /* it's a non-present to present mapping. Only flush if caching mode */
2919 if (cap_caching_mode(iommu->cap))
David Woodhouse03d6a242009-06-28 15:33:46 +01002920 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002921 else
Weidong Han8c11e792008-12-08 15:29:22 +08002922 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002923
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002924 return nelems;
2925}
2926
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002927static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2928{
2929 return !dma_addr;
2930}
2931
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002932struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002933 .alloc_coherent = intel_alloc_coherent,
2934 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002935 .map_sg = intel_map_sg,
2936 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002937 .map_page = intel_map_page,
2938 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002939 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940};
2941
2942static inline int iommu_domain_cache_init(void)
2943{
2944 int ret = 0;
2945
2946 iommu_domain_cache = kmem_cache_create("iommu_domain",
2947 sizeof(struct dmar_domain),
2948 0,
2949 SLAB_HWCACHE_ALIGN,
2950
2951 NULL);
2952 if (!iommu_domain_cache) {
2953 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2954 ret = -ENOMEM;
2955 }
2956
2957 return ret;
2958}
2959
2960static inline int iommu_devinfo_cache_init(void)
2961{
2962 int ret = 0;
2963
2964 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2965 sizeof(struct device_domain_info),
2966 0,
2967 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002968 NULL);
2969 if (!iommu_devinfo_cache) {
2970 printk(KERN_ERR "Couldn't create devinfo cache\n");
2971 ret = -ENOMEM;
2972 }
2973
2974 return ret;
2975}
2976
2977static inline int iommu_iova_cache_init(void)
2978{
2979 int ret = 0;
2980
2981 iommu_iova_cache = kmem_cache_create("iommu_iova",
2982 sizeof(struct iova),
2983 0,
2984 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002985 NULL);
2986 if (!iommu_iova_cache) {
2987 printk(KERN_ERR "Couldn't create iova cache\n");
2988 ret = -ENOMEM;
2989 }
2990
2991 return ret;
2992}
2993
2994static int __init iommu_init_mempool(void)
2995{
2996 int ret;
2997 ret = iommu_iova_cache_init();
2998 if (ret)
2999 return ret;
3000
3001 ret = iommu_domain_cache_init();
3002 if (ret)
3003 goto domain_error;
3004
3005 ret = iommu_devinfo_cache_init();
3006 if (!ret)
3007 return ret;
3008
3009 kmem_cache_destroy(iommu_domain_cache);
3010domain_error:
3011 kmem_cache_destroy(iommu_iova_cache);
3012
3013 return -ENOMEM;
3014}
3015
3016static void __init iommu_exit_mempool(void)
3017{
3018 kmem_cache_destroy(iommu_devinfo_cache);
3019 kmem_cache_destroy(iommu_domain_cache);
3020 kmem_cache_destroy(iommu_iova_cache);
3021
3022}
3023
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003024static void __init init_no_remapping_devices(void)
3025{
3026 struct dmar_drhd_unit *drhd;
3027
3028 for_each_drhd_unit(drhd) {
3029 if (!drhd->include_all) {
3030 int i;
3031 for (i = 0; i < drhd->devices_cnt; i++)
3032 if (drhd->devices[i] != NULL)
3033 break;
3034 /* ignore DMAR unit if no pci devices exist */
3035 if (i == drhd->devices_cnt)
3036 drhd->ignored = 1;
3037 }
3038 }
3039
3040 if (dmar_map_gfx)
3041 return;
3042
3043 for_each_drhd_unit(drhd) {
3044 int i;
3045 if (drhd->ignored || drhd->include_all)
3046 continue;
3047
3048 for (i = 0; i < drhd->devices_cnt; i++)
3049 if (drhd->devices[i] &&
3050 !IS_GFX_DEVICE(drhd->devices[i]))
3051 break;
3052
3053 if (i < drhd->devices_cnt)
3054 continue;
3055
3056 /* bypass IOMMU if it is just for gfx devices */
3057 drhd->ignored = 1;
3058 for (i = 0; i < drhd->devices_cnt; i++) {
3059 if (!drhd->devices[i])
3060 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003061 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003062 }
3063 }
3064}
3065
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003066#ifdef CONFIG_SUSPEND
3067static int init_iommu_hw(void)
3068{
3069 struct dmar_drhd_unit *drhd;
3070 struct intel_iommu *iommu = NULL;
3071
3072 for_each_active_iommu(iommu, drhd)
3073 if (iommu->qi)
3074 dmar_reenable_qi(iommu);
3075
3076 for_each_active_iommu(iommu, drhd) {
3077 iommu_flush_write_buffer(iommu);
3078
3079 iommu_set_root_entry(iommu);
3080
3081 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003082 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003083 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003084 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003085 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003086 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003087 }
3088
3089 return 0;
3090}
3091
3092static void iommu_flush_all(void)
3093{
3094 struct dmar_drhd_unit *drhd;
3095 struct intel_iommu *iommu;
3096
3097 for_each_active_iommu(iommu, drhd) {
3098 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003099 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003100 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003101 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003102 }
3103}
3104
3105static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3106{
3107 struct dmar_drhd_unit *drhd;
3108 struct intel_iommu *iommu = NULL;
3109 unsigned long flag;
3110
3111 for_each_active_iommu(iommu, drhd) {
3112 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3113 GFP_ATOMIC);
3114 if (!iommu->iommu_state)
3115 goto nomem;
3116 }
3117
3118 iommu_flush_all();
3119
3120 for_each_active_iommu(iommu, drhd) {
3121 iommu_disable_translation(iommu);
3122
3123 spin_lock_irqsave(&iommu->register_lock, flag);
3124
3125 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3126 readl(iommu->reg + DMAR_FECTL_REG);
3127 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3128 readl(iommu->reg + DMAR_FEDATA_REG);
3129 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3130 readl(iommu->reg + DMAR_FEADDR_REG);
3131 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3132 readl(iommu->reg + DMAR_FEUADDR_REG);
3133
3134 spin_unlock_irqrestore(&iommu->register_lock, flag);
3135 }
3136 return 0;
3137
3138nomem:
3139 for_each_active_iommu(iommu, drhd)
3140 kfree(iommu->iommu_state);
3141
3142 return -ENOMEM;
3143}
3144
3145static int iommu_resume(struct sys_device *dev)
3146{
3147 struct dmar_drhd_unit *drhd;
3148 struct intel_iommu *iommu = NULL;
3149 unsigned long flag;
3150
3151 if (init_iommu_hw()) {
3152 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3153 return -EIO;
3154 }
3155
3156 for_each_active_iommu(iommu, drhd) {
3157
3158 spin_lock_irqsave(&iommu->register_lock, flag);
3159
3160 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3161 iommu->reg + DMAR_FECTL_REG);
3162 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3163 iommu->reg + DMAR_FEDATA_REG);
3164 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3165 iommu->reg + DMAR_FEADDR_REG);
3166 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3167 iommu->reg + DMAR_FEUADDR_REG);
3168
3169 spin_unlock_irqrestore(&iommu->register_lock, flag);
3170 }
3171
3172 for_each_active_iommu(iommu, drhd)
3173 kfree(iommu->iommu_state);
3174
3175 return 0;
3176}
3177
3178static struct sysdev_class iommu_sysclass = {
3179 .name = "iommu",
3180 .resume = iommu_resume,
3181 .suspend = iommu_suspend,
3182};
3183
3184static struct sys_device device_iommu = {
3185 .cls = &iommu_sysclass,
3186};
3187
3188static int __init init_iommu_sysfs(void)
3189{
3190 int error;
3191
3192 error = sysdev_class_register(&iommu_sysclass);
3193 if (error)
3194 return error;
3195
3196 error = sysdev_register(&device_iommu);
3197 if (error)
3198 sysdev_class_unregister(&iommu_sysclass);
3199
3200 return error;
3201}
3202
3203#else
3204static int __init init_iommu_sysfs(void)
3205{
3206 return 0;
3207}
3208#endif /* CONFIG_PM */
3209
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003210int __init intel_iommu_init(void)
3211{
3212 int ret = 0;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003213 int force_on = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003214
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003215 /* VT-d is required for a TXT/tboot launch, so enforce that */
3216 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003217
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003218 if (dmar_table_init()) {
3219 if (force_on)
3220 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003221 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003222 }
3223
3224 if (dmar_dev_scope_init()) {
3225 if (force_on)
3226 panic("tboot: Failed to initialize DMAR device scope\n");
3227 return -ENODEV;
3228 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003229
Suresh Siddha2ae21012008-07-10 11:16:43 -07003230 /*
3231 * Check the need for DMA-remapping initialization now.
3232 * Above initialization will also be used by Interrupt-remapping.
3233 */
David Woodhouse19943b02009-08-04 16:19:20 +01003234 if (no_iommu || swiotlb || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003235 return -ENODEV;
3236
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003237 iommu_init_mempool();
3238 dmar_init_reserved_ranges();
3239
3240 init_no_remapping_devices();
3241
3242 ret = init_dmars();
3243 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003244 if (force_on)
3245 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003246 printk(KERN_ERR "IOMMU: dmar init failed\n");
3247 put_iova_domain(&reserved_iova_list);
3248 iommu_exit_mempool();
3249 return ret;
3250 }
3251 printk(KERN_INFO
3252 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3253
mark gross5e0d2a62008-03-04 15:22:08 -08003254 init_timer(&unmap_timer);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003255 force_iommu = 1;
David Woodhouse19943b02009-08-04 16:19:20 +01003256 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003257
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003258 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003259
3260 register_iommu(&intel_iommu_ops);
3261
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003262 return 0;
3263}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003264
Han, Weidong3199aa62009-02-26 17:31:12 +08003265static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3266 struct pci_dev *pdev)
3267{
3268 struct pci_dev *tmp, *parent;
3269
3270 if (!iommu || !pdev)
3271 return;
3272
3273 /* dependent device detach */
3274 tmp = pci_find_upstream_pcie_bridge(pdev);
3275 /* Secondary interface's bus number and devfn 0 */
3276 if (tmp) {
3277 parent = pdev->bus->self;
3278 while (parent != tmp) {
3279 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003280 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003281 parent = parent->bus->self;
3282 }
3283 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3284 iommu_detach_dev(iommu,
3285 tmp->subordinate->number, 0);
3286 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003287 iommu_detach_dev(iommu, tmp->bus->number,
3288 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003289 }
3290}
3291
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003292static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003293 struct pci_dev *pdev)
3294{
3295 struct device_domain_info *info;
3296 struct intel_iommu *iommu;
3297 unsigned long flags;
3298 int found = 0;
3299 struct list_head *entry, *tmp;
3300
David Woodhouse276dbf992009-04-04 01:45:37 +01003301 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3302 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003303 if (!iommu)
3304 return;
3305
3306 spin_lock_irqsave(&device_domain_lock, flags);
3307 list_for_each_safe(entry, tmp, &domain->devices) {
3308 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003309 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003310 if (info->bus == pdev->bus->number &&
3311 info->devfn == pdev->devfn) {
3312 list_del(&info->link);
3313 list_del(&info->global);
3314 if (info->dev)
3315 info->dev->dev.archdata.iommu = NULL;
3316 spin_unlock_irqrestore(&device_domain_lock, flags);
3317
Yu Zhao93a23a72009-05-18 13:51:37 +08003318 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003319 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003320 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003321 free_devinfo_mem(info);
3322
3323 spin_lock_irqsave(&device_domain_lock, flags);
3324
3325 if (found)
3326 break;
3327 else
3328 continue;
3329 }
3330
3331 /* if there is no other devices under the same iommu
3332 * owned by this domain, clear this iommu in iommu_bmp
3333 * update iommu count and coherency
3334 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003335 if (iommu == device_to_iommu(info->segment, info->bus,
3336 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003337 found = 1;
3338 }
3339
3340 if (found == 0) {
3341 unsigned long tmp_flags;
3342 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3343 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3344 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003345 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003346 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3347 }
3348
3349 spin_unlock_irqrestore(&device_domain_lock, flags);
3350}
3351
3352static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3353{
3354 struct device_domain_info *info;
3355 struct intel_iommu *iommu;
3356 unsigned long flags1, flags2;
3357
3358 spin_lock_irqsave(&device_domain_lock, flags1);
3359 while (!list_empty(&domain->devices)) {
3360 info = list_entry(domain->devices.next,
3361 struct device_domain_info, link);
3362 list_del(&info->link);
3363 list_del(&info->global);
3364 if (info->dev)
3365 info->dev->dev.archdata.iommu = NULL;
3366
3367 spin_unlock_irqrestore(&device_domain_lock, flags1);
3368
Yu Zhao93a23a72009-05-18 13:51:37 +08003369 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003370 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003371 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003372 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003373
3374 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003375 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003376 */
3377 spin_lock_irqsave(&domain->iommu_lock, flags2);
3378 if (test_and_clear_bit(iommu->seq_id,
3379 &domain->iommu_bmp)) {
3380 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003381 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003382 }
3383 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3384
3385 free_devinfo_mem(info);
3386 spin_lock_irqsave(&device_domain_lock, flags1);
3387 }
3388 spin_unlock_irqrestore(&device_domain_lock, flags1);
3389}
3390
Weidong Han5e98c4b2008-12-08 23:03:27 +08003391/* domain id for virtual machine, it won't be set in context */
3392static unsigned long vm_domid;
3393
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003394static int vm_domain_min_agaw(struct dmar_domain *domain)
3395{
3396 int i;
3397 int min_agaw = domain->agaw;
3398
3399 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3400 for (; i < g_num_of_iommus; ) {
3401 if (min_agaw > g_iommus[i]->agaw)
3402 min_agaw = g_iommus[i]->agaw;
3403
3404 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3405 }
3406
3407 return min_agaw;
3408}
3409
Weidong Han5e98c4b2008-12-08 23:03:27 +08003410static struct dmar_domain *iommu_alloc_vm_domain(void)
3411{
3412 struct dmar_domain *domain;
3413
3414 domain = alloc_domain_mem();
3415 if (!domain)
3416 return NULL;
3417
3418 domain->id = vm_domid++;
3419 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3420 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3421
3422 return domain;
3423}
3424
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003425static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003426{
3427 int adjust_width;
3428
3429 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003430 spin_lock_init(&domain->iommu_lock);
3431
3432 domain_reserve_special_ranges(domain);
3433
3434 /* calculate AGAW */
3435 domain->gaw = guest_width;
3436 adjust_width = guestwidth_to_adjustwidth(guest_width);
3437 domain->agaw = width_to_agaw(adjust_width);
3438
3439 INIT_LIST_HEAD(&domain->devices);
3440
3441 domain->iommu_count = 0;
3442 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003443 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003444 domain->max_addr = 0;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003445
3446 /* always allocate the top pgd */
3447 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3448 if (!domain->pgd)
3449 return -ENOMEM;
3450 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3451 return 0;
3452}
3453
3454static void iommu_free_vm_domain(struct dmar_domain *domain)
3455{
3456 unsigned long flags;
3457 struct dmar_drhd_unit *drhd;
3458 struct intel_iommu *iommu;
3459 unsigned long i;
3460 unsigned long ndomains;
3461
3462 for_each_drhd_unit(drhd) {
3463 if (drhd->ignored)
3464 continue;
3465 iommu = drhd->iommu;
3466
3467 ndomains = cap_ndoms(iommu->cap);
3468 i = find_first_bit(iommu->domain_ids, ndomains);
3469 for (; i < ndomains; ) {
3470 if (iommu->domains[i] == domain) {
3471 spin_lock_irqsave(&iommu->lock, flags);
3472 clear_bit(i, iommu->domain_ids);
3473 iommu->domains[i] = NULL;
3474 spin_unlock_irqrestore(&iommu->lock, flags);
3475 break;
3476 }
3477 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3478 }
3479 }
3480}
3481
3482static void vm_domain_exit(struct dmar_domain *domain)
3483{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003484 /* Domain 0 is reserved, so dont process it */
3485 if (!domain)
3486 return;
3487
3488 vm_domain_remove_all_dev_info(domain);
3489 /* destroy iovas */
3490 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003491
3492 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003493 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003494
3495 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003496 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003497
3498 iommu_free_vm_domain(domain);
3499 free_domain_mem(domain);
3500}
3501
Joerg Roedel5d450802008-12-03 14:52:32 +01003502static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003503{
Joerg Roedel5d450802008-12-03 14:52:32 +01003504 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003505
Joerg Roedel5d450802008-12-03 14:52:32 +01003506 dmar_domain = iommu_alloc_vm_domain();
3507 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003508 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003509 "intel_iommu_domain_init: dmar_domain == NULL\n");
3510 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003511 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003512 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003513 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003514 "intel_iommu_domain_init() failed\n");
3515 vm_domain_exit(dmar_domain);
3516 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003517 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003518 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003519
Joerg Roedel5d450802008-12-03 14:52:32 +01003520 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003521}
Kay, Allen M38717942008-09-09 18:37:29 +03003522
Joerg Roedel5d450802008-12-03 14:52:32 +01003523static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003524{
Joerg Roedel5d450802008-12-03 14:52:32 +01003525 struct dmar_domain *dmar_domain = domain->priv;
3526
3527 domain->priv = NULL;
3528 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003529}
Kay, Allen M38717942008-09-09 18:37:29 +03003530
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003531static int intel_iommu_attach_device(struct iommu_domain *domain,
3532 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003533{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003534 struct dmar_domain *dmar_domain = domain->priv;
3535 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003536 struct intel_iommu *iommu;
3537 int addr_width;
3538 u64 end;
Kay, Allen M38717942008-09-09 18:37:29 +03003539
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003540 /* normally pdev is not mapped */
3541 if (unlikely(domain_context_mapped(pdev))) {
3542 struct dmar_domain *old_domain;
3543
3544 old_domain = find_domain(pdev);
3545 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003546 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3547 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3548 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003549 else
3550 domain_remove_dev_info(old_domain);
3551 }
3552 }
3553
David Woodhouse276dbf992009-04-04 01:45:37 +01003554 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3555 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003556 if (!iommu)
3557 return -ENODEV;
3558
3559 /* check if this iommu agaw is sufficient for max mapped address */
3560 addr_width = agaw_to_width(iommu->agaw);
3561 end = DOMAIN_MAX_ADDR(addr_width);
3562 end = end & VTD_PAGE_MASK;
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003563 if (end < dmar_domain->max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003564 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3565 "sufficient for the mapped address (%llx)\n",
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003566 __func__, iommu->agaw, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003567 return -EFAULT;
3568 }
3569
David Woodhouse5fe60f42009-08-09 10:53:41 +01003570 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003571}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003572
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003573static void intel_iommu_detach_device(struct iommu_domain *domain,
3574 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003575{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003576 struct dmar_domain *dmar_domain = domain->priv;
3577 struct pci_dev *pdev = to_pci_dev(dev);
3578
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003579 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003580}
Kay, Allen M38717942008-09-09 18:37:29 +03003581
Joerg Roedeldde57a22008-12-03 15:04:09 +01003582static int intel_iommu_map_range(struct iommu_domain *domain,
3583 unsigned long iova, phys_addr_t hpa,
3584 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003585{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003586 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003587 u64 max_addr;
3588 int addr_width;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003589 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003590 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003591
Joerg Roedeldde57a22008-12-03 15:04:09 +01003592 if (iommu_prot & IOMMU_READ)
3593 prot |= DMA_PTE_READ;
3594 if (iommu_prot & IOMMU_WRITE)
3595 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003596 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3597 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003598
David Woodhouse163cc522009-06-28 00:51:17 +01003599 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003600 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003601 int min_agaw;
3602 u64 end;
3603
3604 /* check if minimum agaw is sufficient for mapped address */
Joerg Roedeldde57a22008-12-03 15:04:09 +01003605 min_agaw = vm_domain_min_agaw(dmar_domain);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003606 addr_width = agaw_to_width(min_agaw);
3607 end = DOMAIN_MAX_ADDR(addr_width);
3608 end = end & VTD_PAGE_MASK;
3609 if (end < max_addr) {
3610 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3611 "sufficient for the mapped address (%llx)\n",
3612 __func__, min_agaw, max_addr);
3613 return -EFAULT;
3614 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003615 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003616 }
David Woodhousead051222009-06-28 14:22:28 +01003617 /* Round up size to next multiple of PAGE_SIZE, if it and
3618 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003619 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003620 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3621 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003622 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003623}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003624
Joerg Roedeldde57a22008-12-03 15:04:09 +01003625static void intel_iommu_unmap_range(struct iommu_domain *domain,
3626 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003627{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003628 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003629
Sheng Yang4b99d352009-07-08 11:52:52 +01003630 if (!size)
3631 return;
3632
David Woodhouse163cc522009-06-28 00:51:17 +01003633 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3634 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003635
David Woodhouse163cc522009-06-28 00:51:17 +01003636 if (dmar_domain->max_addr == iova + size)
3637 dmar_domain->max_addr = iova;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003638}
Kay, Allen M38717942008-09-09 18:37:29 +03003639
Joerg Roedeld14d6572008-12-03 15:06:57 +01003640static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3641 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003642{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003643 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003644 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003645 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003646
David Woodhouseb026fd22009-06-28 10:37:25 +01003647 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003648 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003649 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003650
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003651 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003652}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003653
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003654static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3655 unsigned long cap)
3656{
3657 struct dmar_domain *dmar_domain = domain->priv;
3658
3659 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3660 return dmar_domain->iommu_snooping;
3661
3662 return 0;
3663}
3664
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003665static struct iommu_ops intel_iommu_ops = {
3666 .domain_init = intel_iommu_domain_init,
3667 .domain_destroy = intel_iommu_domain_destroy,
3668 .attach_dev = intel_iommu_attach_device,
3669 .detach_dev = intel_iommu_detach_device,
3670 .map = intel_iommu_map_range,
3671 .unmap = intel_iommu_unmap_range,
3672 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003673 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003674};
David Woodhouse9af88142009-02-13 23:18:03 +00003675
3676static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3677{
3678 /*
3679 * Mobile 4 Series Chipset neglects to set RWBF capability,
3680 * but needs it:
3681 */
3682 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3683 rwbf_quirk = 1;
3684}
3685
3686DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07003687
3688/* On Tylersburg chipsets, some BIOSes have been known to enable the
3689 ISOCH DMAR unit for the Azalia sound device, but not give it any
3690 TLB entries, which causes it to deadlock. Check for that. We do
3691 this in a function called from init_dmars(), instead of in a PCI
3692 quirk, because we don't want to print the obnoxious "BIOS broken"
3693 message if VT-d is actually disabled.
3694*/
3695static void __init check_tylersburg_isoch(void)
3696{
3697 struct pci_dev *pdev;
3698 uint32_t vtisochctrl;
3699
3700 /* If there's no Azalia in the system anyway, forget it. */
3701 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3702 if (!pdev)
3703 return;
3704 pci_dev_put(pdev);
3705
3706 /* System Management Registers. Might be hidden, in which case
3707 we can't do the sanity check. But that's OK, because the
3708 known-broken BIOSes _don't_ actually hide it, so far. */
3709 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3710 if (!pdev)
3711 return;
3712
3713 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3714 pci_dev_put(pdev);
3715 return;
3716 }
3717
3718 pci_dev_put(pdev);
3719
3720 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3721 if (vtisochctrl & 1)
3722 return;
3723
3724 /* Drop all bits other than the number of TLB entries */
3725 vtisochctrl &= 0x1c;
3726
3727 /* If we have the recommended number of TLB entries (16), fine. */
3728 if (vtisochctrl == 0x10)
3729 return;
3730
3731 /* Zero TLB entries? You get to ride the short bus to school. */
3732 if (!vtisochctrl) {
3733 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
3734 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3735 dmi_get_system_info(DMI_BIOS_VENDOR),
3736 dmi_get_system_info(DMI_BIOS_VERSION),
3737 dmi_get_system_info(DMI_PRODUCT_VERSION));
3738 iommu_identity_mapping |= IDENTMAP_AZALIA;
3739 return;
3740 }
3741
3742 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
3743 vtisochctrl);
3744}