blob: 4789f8e8bf7ad91cae0b0a1043349ac96ea5fa64 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070027#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070030#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080035#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030036#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010037#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030038#include <linux/intel-iommu.h>
Fenghua Yuf59c7b62009-03-27 14:22:42 -070039#include <linux/sysdev.h>
Shane Wang69575d32009-09-01 18:25:07 -070040#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100041#include <linux/dmi.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070042#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090043#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include "pci.h"
45
Fenghua Yu5b6985c2008-10-16 18:02:32 -070046#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070049#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070051#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052
53#define IOAPIC_RANGE_START (0xfee00000)
54#define IOAPIC_RANGE_END (0xfeefffff)
55#define IOVA_START_ADDR (0x1000)
56
57#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
58
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070059#define MAX_AGAW_WIDTH 64
60
David Woodhouse2ebe3152009-09-19 07:34:04 -070061#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
62#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
63
64/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
65 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
66#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
67 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
68#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070069
Mark McLoughlinf27be032008-11-20 15:49:43 +000070#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070071#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070072#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080073
Andrew Mortondf08cdc2010-09-22 13:05:11 -070074/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
David Woodhousefd18de52009-05-10 23:57:41 +0100117
David Woodhousedd4e8312009-06-27 16:21:20 +0100118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
119 are never going to work. */
120static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
121{
122 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
123}
124
125static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
126{
127 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
128}
129static inline unsigned long page_to_dma_pfn(struct page *pg)
130{
131 return mm_to_dma_pfn(page_to_pfn(pg));
132}
133static inline unsigned long virt_to_dma_pfn(void *p)
134{
135 return page_to_dma_pfn(virt_to_page(p));
136}
137
Weidong Hand9630fe2008-12-08 11:06:32 +0800138/* global iommu list, set NULL for ignored DMAR units */
139static struct intel_iommu **g_iommus;
140
David Woodhousee0fc7e02009-09-30 09:12:17 -0700141static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000142static int rwbf_quirk;
143
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000144/*
145 * 0: Present
146 * 1-11: Reserved
147 * 12-63: Context Ptr (12 - (haw-1))
148 * 64-127: Reserved
149 */
150struct root_entry {
151 u64 val;
152 u64 rsvd1;
153};
154#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
155static inline bool root_present(struct root_entry *root)
156{
157 return (root->val & 1);
158}
159static inline void set_root_present(struct root_entry *root)
160{
161 root->val |= 1;
162}
163static inline void set_root_value(struct root_entry *root, unsigned long value)
164{
165 root->val |= value & VTD_PAGE_MASK;
166}
167
168static inline struct context_entry *
169get_context_addr_from_root(struct root_entry *root)
170{
171 return (struct context_entry *)
172 (root_present(root)?phys_to_virt(
173 root->val & VTD_PAGE_MASK) :
174 NULL);
175}
176
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000177/*
178 * low 64 bits:
179 * 0: present
180 * 1: fault processing disable
181 * 2-3: translation type
182 * 12-63: address space root
183 * high 64 bits:
184 * 0-2: address width
185 * 3-6: aval
186 * 8-23: domain id
187 */
188struct context_entry {
189 u64 lo;
190 u64 hi;
191};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000192
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000193static inline bool context_present(struct context_entry *context)
194{
195 return (context->lo & 1);
196}
197static inline void context_set_present(struct context_entry *context)
198{
199 context->lo |= 1;
200}
201
202static inline void context_set_fault_enable(struct context_entry *context)
203{
204 context->lo &= (((u64)-1) << 2) | 1;
205}
206
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000207static inline void context_set_translation_type(struct context_entry *context,
208 unsigned long value)
209{
210 context->lo &= (((u64)-1) << 4) | 3;
211 context->lo |= (value & 3) << 2;
212}
213
214static inline void context_set_address_root(struct context_entry *context,
215 unsigned long value)
216{
217 context->lo |= value & VTD_PAGE_MASK;
218}
219
220static inline void context_set_address_width(struct context_entry *context,
221 unsigned long value)
222{
223 context->hi |= value & 7;
224}
225
226static inline void context_set_domain_id(struct context_entry *context,
227 unsigned long value)
228{
229 context->hi |= (value & ((1 << 16) - 1)) << 8;
230}
231
232static inline void context_clear_entry(struct context_entry *context)
233{
234 context->lo = 0;
235 context->hi = 0;
236}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000237
Mark McLoughlin622ba122008-11-20 15:49:46 +0000238/*
239 * 0: readable
240 * 1: writable
241 * 2-6: reserved
242 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800243 * 8-10: available
244 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000245 * 12-63: Host physcial address
246 */
247struct dma_pte {
248 u64 val;
249};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000250
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000251static inline void dma_clear_pte(struct dma_pte *pte)
252{
253 pte->val = 0;
254}
255
256static inline void dma_set_pte_readable(struct dma_pte *pte)
257{
258 pte->val |= DMA_PTE_READ;
259}
260
261static inline void dma_set_pte_writable(struct dma_pte *pte)
262{
263 pte->val |= DMA_PTE_WRITE;
264}
265
Sheng Yang9cf066972009-03-18 15:33:07 +0800266static inline void dma_set_pte_snp(struct dma_pte *pte)
267{
268 pte->val |= DMA_PTE_SNP;
269}
270
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000271static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
272{
273 pte->val = (pte->val & ~3) | (prot & 3);
274}
275
276static inline u64 dma_pte_addr(struct dma_pte *pte)
277{
David Woodhousec85994e2009-07-01 19:21:24 +0100278#ifdef CONFIG_64BIT
279 return pte->val & VTD_PAGE_MASK;
280#else
281 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100282 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100283#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000284}
285
David Woodhousedd4e8312009-06-27 16:21:20 +0100286static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000287{
David Woodhousedd4e8312009-06-27 16:21:20 +0100288 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000289}
290
291static inline bool dma_pte_present(struct dma_pte *pte)
292{
293 return (pte->val & 3) != 0;
294}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000295
David Woodhouse75e6bf92009-07-02 11:21:16 +0100296static inline int first_pte_in_page(struct dma_pte *pte)
297{
298 return !((unsigned long)pte & ~VTD_PAGE_MASK);
299}
300
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700301/*
302 * This domain is a statically identity mapping domain.
303 * 1. This domain creats a static 1:1 mapping to all usable memory.
304 * 2. It maps to each iommu if successful.
305 * 3. Each iommu mapps to this domain if successful.
306 */
David Woodhouse19943b02009-08-04 16:19:20 +0100307static struct dmar_domain *si_domain;
308static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700309
Weidong Han3b5410e2008-12-08 09:17:15 +0800310/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100311#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800312
Weidong Han1ce28fe2008-12-08 16:35:39 +0800313/* domain represents a virtual machine, more than one devices
314 * across iommus may be owned in one domain, e.g. kvm guest.
315 */
316#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
317
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700318/* si_domain contains mulitple devices */
319#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
320
Mark McLoughlin99126f72008-11-20 15:49:47 +0000321struct dmar_domain {
322 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700323 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800324 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000325
326 struct list_head devices; /* all devices' list */
327 struct iova_domain iovad; /* iova's that belong to this domain */
328
329 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000330 int gaw; /* max guest address width */
331
332 /* adjusted guest address width, 0 is level 2 30-bit */
333 int agaw;
334
Weidong Han3b5410e2008-12-08 09:17:15 +0800335 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800336
337 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800338 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800339 int iommu_count; /* reference count of iommu */
340 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800341 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000342};
343
Mark McLoughlina647dac2008-11-20 15:49:48 +0000344/* PCI domain-device relationship */
345struct device_domain_info {
346 struct list_head link; /* link to domain siblings */
347 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100348 int segment; /* PCI domain */
349 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000350 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500351 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800352 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000353 struct dmar_domain *domain; /* pointer to domain */
354};
355
mark gross5e0d2a62008-03-04 15:22:08 -0800356static void flush_unmaps_timeout(unsigned long data);
357
358DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
359
mark gross80b20dd2008-04-18 13:53:58 -0700360#define HIGH_WATER_MARK 250
361struct deferred_flush_tables {
362 int next;
363 struct iova *iova[HIGH_WATER_MARK];
364 struct dmar_domain *domain[HIGH_WATER_MARK];
365};
366
367static struct deferred_flush_tables *deferred_flush;
368
mark gross5e0d2a62008-03-04 15:22:08 -0800369/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800370static int g_num_of_iommus;
371
372static DEFINE_SPINLOCK(async_umap_flush_lock);
373static LIST_HEAD(unmaps_to_do);
374
375static int timer_on;
376static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800377
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700378static void domain_remove_dev_info(struct dmar_domain *domain);
379
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800380#ifdef CONFIG_DMAR_DEFAULT_ON
381int dmar_disabled = 0;
382#else
383int dmar_disabled = 1;
384#endif /*CONFIG_DMAR_DEFAULT_ON*/
385
David Woodhouse2d9e6672010-06-15 10:57:57 +0100386static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700387static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800388static int intel_iommu_strict;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700389
390#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
391static DEFINE_SPINLOCK(device_domain_lock);
392static LIST_HEAD(device_domain_list);
393
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100394static struct iommu_ops intel_iommu_ops;
395
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700396static int __init intel_iommu_setup(char *str)
397{
398 if (!str)
399 return -EINVAL;
400 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800401 if (!strncmp(str, "on", 2)) {
402 dmar_disabled = 0;
403 printk(KERN_INFO "Intel-IOMMU: enabled\n");
404 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700405 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800406 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700407 } else if (!strncmp(str, "igfx_off", 8)) {
408 dmar_map_gfx = 0;
409 printk(KERN_INFO
410 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700411 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800412 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700413 "Intel-IOMMU: Forcing DAC for PCI devices\n");
414 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800415 } else if (!strncmp(str, "strict", 6)) {
416 printk(KERN_INFO
417 "Intel-IOMMU: disable batched IOTLB flush\n");
418 intel_iommu_strict = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419 }
420
421 str += strcspn(str, ",");
422 while (*str == ',')
423 str++;
424 }
425 return 0;
426}
427__setup("intel_iommu=", intel_iommu_setup);
428
429static struct kmem_cache *iommu_domain_cache;
430static struct kmem_cache *iommu_devinfo_cache;
431static struct kmem_cache *iommu_iova_cache;
432
Suresh Siddha4c923d42009-10-02 11:01:24 -0700433static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700434{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700435 struct page *page;
436 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700437
Suresh Siddha4c923d42009-10-02 11:01:24 -0700438 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
439 if (page)
440 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700441 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700442}
443
444static inline void free_pgtable_page(void *vaddr)
445{
446 free_page((unsigned long)vaddr);
447}
448
449static inline void *alloc_domain_mem(void)
450{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900451 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452}
453
Kay, Allen M38717942008-09-09 18:37:29 +0300454static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700455{
456 kmem_cache_free(iommu_domain_cache, vaddr);
457}
458
459static inline void * alloc_devinfo_mem(void)
460{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900461 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700462}
463
464static inline void free_devinfo_mem(void *vaddr)
465{
466 kmem_cache_free(iommu_devinfo_cache, vaddr);
467}
468
469struct iova *alloc_iova_mem(void)
470{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900471 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700472}
473
474void free_iova_mem(struct iova *iova)
475{
476 kmem_cache_free(iommu_iova_cache, iova);
477}
478
Weidong Han1b573682008-12-08 15:34:06 +0800479
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800481{
482 unsigned long sagaw;
483 int agaw = -1;
484
485 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700486 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800487 agaw >= 0; agaw--) {
488 if (test_bit(agaw, &sagaw))
489 break;
490 }
491
492 return agaw;
493}
494
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700495/*
496 * Calculate max SAGAW for each iommu.
497 */
498int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
499{
500 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
501}
502
503/*
504 * calculate agaw for each iommu.
505 * "SAGAW" may be different across iommus, use a default agaw, and
506 * get a supported less agaw for iommus that don't support the default agaw.
507 */
508int iommu_calculate_agaw(struct intel_iommu *iommu)
509{
510 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
511}
512
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700513/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800514static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
515{
516 int iommu_id;
517
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700518 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800519 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700520 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800521
Weidong Han8c11e792008-12-08 15:29:22 +0800522 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
523 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
524 return NULL;
525
526 return g_iommus[iommu_id];
527}
528
Weidong Han8e6040972008-12-08 15:49:06 +0800529static void domain_update_iommu_coherency(struct dmar_domain *domain)
530{
531 int i;
532
533 domain->iommu_coherency = 1;
534
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800535 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800536 if (!ecap_coherent(g_iommus[i]->ecap)) {
537 domain->iommu_coherency = 0;
538 break;
539 }
Weidong Han8e6040972008-12-08 15:49:06 +0800540 }
541}
542
Sheng Yang58c610b2009-03-18 15:33:05 +0800543static void domain_update_iommu_snooping(struct dmar_domain *domain)
544{
545 int i;
546
547 domain->iommu_snooping = 1;
548
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800549 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800550 if (!ecap_sc_support(g_iommus[i]->ecap)) {
551 domain->iommu_snooping = 0;
552 break;
553 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800554 }
555}
556
557/* Some capabilities may be different across iommus */
558static void domain_update_iommu_cap(struct dmar_domain *domain)
559{
560 domain_update_iommu_coherency(domain);
561 domain_update_iommu_snooping(domain);
562}
563
David Woodhouse276dbf992009-04-04 01:45:37 +0100564static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800565{
566 struct dmar_drhd_unit *drhd = NULL;
567 int i;
568
569 for_each_drhd_unit(drhd) {
570 if (drhd->ignored)
571 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100572 if (segment != drhd->segment)
573 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800574
David Woodhouse924b6232009-04-04 00:39:25 +0100575 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000576 if (drhd->devices[i] &&
577 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800578 drhd->devices[i]->devfn == devfn)
579 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700580 if (drhd->devices[i] &&
581 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100582 drhd->devices[i]->subordinate->number <= bus &&
583 drhd->devices[i]->subordinate->subordinate >= bus)
584 return drhd->iommu;
585 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800586
587 if (drhd->include_all)
588 return drhd->iommu;
589 }
590
591 return NULL;
592}
593
Weidong Han5331fe62008-12-08 23:00:00 +0800594static void domain_flush_cache(struct dmar_domain *domain,
595 void *addr, int size)
596{
597 if (!domain->iommu_coherency)
598 clflush_cache_range(addr, size);
599}
600
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700601/* Gets context entry for a given bus and devfn */
602static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
603 u8 bus, u8 devfn)
604{
605 struct root_entry *root;
606 struct context_entry *context;
607 unsigned long phy_addr;
608 unsigned long flags;
609
610 spin_lock_irqsave(&iommu->lock, flags);
611 root = &iommu->root_entry[bus];
612 context = get_context_addr_from_root(root);
613 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700614 context = (struct context_entry *)
615 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700616 if (!context) {
617 spin_unlock_irqrestore(&iommu->lock, flags);
618 return NULL;
619 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700620 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700621 phy_addr = virt_to_phys((void *)context);
622 set_root_value(root, phy_addr);
623 set_root_present(root);
624 __iommu_flush_cache(iommu, root, sizeof(*root));
625 }
626 spin_unlock_irqrestore(&iommu->lock, flags);
627 return &context[devfn];
628}
629
630static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
631{
632 struct root_entry *root;
633 struct context_entry *context;
634 int ret;
635 unsigned long flags;
636
637 spin_lock_irqsave(&iommu->lock, flags);
638 root = &iommu->root_entry[bus];
639 context = get_context_addr_from_root(root);
640 if (!context) {
641 ret = 0;
642 goto out;
643 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000644 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700645out:
646 spin_unlock_irqrestore(&iommu->lock, flags);
647 return ret;
648}
649
650static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
651{
652 struct root_entry *root;
653 struct context_entry *context;
654 unsigned long flags;
655
656 spin_lock_irqsave(&iommu->lock, flags);
657 root = &iommu->root_entry[bus];
658 context = get_context_addr_from_root(root);
659 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000660 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700661 __iommu_flush_cache(iommu, &context[devfn], \
662 sizeof(*context));
663 }
664 spin_unlock_irqrestore(&iommu->lock, flags);
665}
666
667static void free_context_table(struct intel_iommu *iommu)
668{
669 struct root_entry *root;
670 int i;
671 unsigned long flags;
672 struct context_entry *context;
673
674 spin_lock_irqsave(&iommu->lock, flags);
675 if (!iommu->root_entry) {
676 goto out;
677 }
678 for (i = 0; i < ROOT_ENTRY_NR; i++) {
679 root = &iommu->root_entry[i];
680 context = get_context_addr_from_root(root);
681 if (context)
682 free_pgtable_page(context);
683 }
684 free_pgtable_page(iommu->root_entry);
685 iommu->root_entry = NULL;
686out:
687 spin_unlock_irqrestore(&iommu->lock, flags);
688}
689
David Woodhouseb026fd22009-06-28 10:37:25 +0100690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
691 unsigned long pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700692{
David Woodhouseb026fd22009-06-28 10:37:25 +0100693 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700694 struct dma_pte *parent, *pte = NULL;
695 int level = agaw_to_level(domain->agaw);
696 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697
698 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100699 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700 parent = domain->pgd;
701
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700702 while (level > 0) {
703 void *tmp_page;
704
David Woodhouseb026fd22009-06-28 10:37:25 +0100705 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700706 pte = &parent[offset];
707 if (level == 1)
708 break;
709
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000710 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100711 uint64_t pteval;
712
Suresh Siddha4c923d42009-10-02 11:01:24 -0700713 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700714
David Woodhouse206a73c12009-07-01 19:30:28 +0100715 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100717
David Woodhousec85994e2009-07-01 19:21:24 +0100718 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400719 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100720 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
721 /* Someone else set it while we were thinking; use theirs. */
722 free_pgtable_page(tmp_page);
723 } else {
724 dma_pte_addr(pte);
725 domain_flush_cache(domain, pte, sizeof(*pte));
726 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700727 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000728 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700729 level--;
730 }
731
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700732 return pte;
733}
734
735/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100736static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
737 unsigned long pfn,
738 int level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700739{
740 struct dma_pte *parent, *pte = NULL;
741 int total = agaw_to_level(domain->agaw);
742 int offset;
743
744 parent = domain->pgd;
745 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100746 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747 pte = &parent[offset];
748 if (level == total)
749 return pte;
750
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000751 if (!dma_pte_present(pte))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752 break;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000753 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 total--;
755 }
756 return NULL;
757}
758
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700759/* clear last level pte, a tlb flush should be followed */
David Woodhouse595badf2009-06-27 22:09:11 +0100760static void dma_pte_clear_range(struct dmar_domain *domain,
761 unsigned long start_pfn,
762 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700763{
David Woodhouse04b18e62009-06-27 19:15:01 +0100764 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100765 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700766
David Woodhouse04b18e62009-06-27 19:15:01 +0100767 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100768 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700769 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100770
David Woodhouse04b18e62009-06-27 19:15:01 +0100771 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700772 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100773 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
774 if (!pte) {
775 start_pfn = align_to_level(start_pfn + 1, 2);
776 continue;
777 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100778 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100779 dma_clear_pte(pte);
780 start_pfn++;
781 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100782 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
783
David Woodhouse310a5ab2009-06-28 18:52:20 +0100784 domain_flush_cache(domain, first_pte,
785 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700786
787 } while (start_pfn && start_pfn <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788}
789
790/* free page table pages. last level pte should already be cleared */
791static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100792 unsigned long start_pfn,
793 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794{
David Woodhouse6660c632009-06-27 22:41:00 +0100795 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100796 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700797 int total = agaw_to_level(domain->agaw);
798 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100799 unsigned long tmp;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700800
David Woodhouse6660c632009-06-27 22:41:00 +0100801 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
802 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700803 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804
David Woodhousef3a0a522009-06-30 03:40:07 +0100805 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700806 level = 2;
807 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100808 tmp = align_to_level(start_pfn, level);
809
David Woodhousef3a0a522009-06-30 03:40:07 +0100810 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100811 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812 return;
813
David Woodhouse59c36282009-09-19 07:36:28 -0700814 do {
David Woodhousef3a0a522009-06-30 03:40:07 +0100815 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
816 if (!pte) {
817 tmp = align_to_level(tmp + 1, level + 1);
818 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700819 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100820 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100821 if (dma_pte_present(pte)) {
822 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
823 dma_clear_pte(pte);
824 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100825 pte++;
826 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100827 } while (!first_pte_in_page(pte) &&
828 tmp + level_size(level) - 1 <= last_pfn);
829
David Woodhousef3a0a522009-06-30 03:40:07 +0100830 domain_flush_cache(domain, first_pte,
831 (void *)pte - (void *)first_pte);
832
David Woodhouse59c36282009-09-19 07:36:28 -0700833 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834 level++;
835 }
836 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100837 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700838 free_pgtable_page(domain->pgd);
839 domain->pgd = NULL;
840 }
841}
842
843/* iommu handling */
844static int iommu_alloc_root_entry(struct intel_iommu *iommu)
845{
846 struct root_entry *root;
847 unsigned long flags;
848
Suresh Siddha4c923d42009-10-02 11:01:24 -0700849 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700850 if (!root)
851 return -ENOMEM;
852
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700853 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700854
855 spin_lock_irqsave(&iommu->lock, flags);
856 iommu->root_entry = root;
857 spin_unlock_irqrestore(&iommu->lock, flags);
858
859 return 0;
860}
861
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700862static void iommu_set_root_entry(struct intel_iommu *iommu)
863{
864 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100865 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866 unsigned long flag;
867
868 addr = iommu->root_entry;
869
870 spin_lock_irqsave(&iommu->register_lock, flag);
871 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
872
David Woodhousec416daa2009-05-10 20:30:58 +0100873 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874
875 /* Make sure hardware complete it */
876 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100877 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700878
879 spin_unlock_irqrestore(&iommu->register_lock, flag);
880}
881
882static void iommu_flush_write_buffer(struct intel_iommu *iommu)
883{
884 u32 val;
885 unsigned long flag;
886
David Woodhouse9af88142009-02-13 23:18:03 +0000887 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700889
890 spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100891 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892
893 /* Make sure hardware complete it */
894 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100895 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700896
897 spin_unlock_irqrestore(&iommu->register_lock, flag);
898}
899
900/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100901static void __iommu_flush_context(struct intel_iommu *iommu,
902 u16 did, u16 source_id, u8 function_mask,
903 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700904{
905 u64 val = 0;
906 unsigned long flag;
907
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700908 switch (type) {
909 case DMA_CCMD_GLOBAL_INVL:
910 val = DMA_CCMD_GLOBAL_INVL;
911 break;
912 case DMA_CCMD_DOMAIN_INVL:
913 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
914 break;
915 case DMA_CCMD_DEVICE_INVL:
916 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
917 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
918 break;
919 default:
920 BUG();
921 }
922 val |= DMA_CCMD_ICC;
923
924 spin_lock_irqsave(&iommu->register_lock, flag);
925 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
926
927 /* Make sure hardware complete it */
928 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
929 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
930
931 spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700932}
933
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700934/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +0100935static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
936 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700937{
938 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
939 u64 val = 0, val_iva = 0;
940 unsigned long flag;
941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942 switch (type) {
943 case DMA_TLB_GLOBAL_FLUSH:
944 /* global flush doesn't need set IVA_REG */
945 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
946 break;
947 case DMA_TLB_DSI_FLUSH:
948 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
949 break;
950 case DMA_TLB_PSI_FLUSH:
951 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
952 /* Note: always flush non-leaf currently */
953 val_iva = size_order | addr;
954 break;
955 default:
956 BUG();
957 }
958 /* Note: set drain read/write */
959#if 0
960 /*
961 * This is probably to be super secure.. Looks like we can
962 * ignore it without any impact.
963 */
964 if (cap_read_drain(iommu->cap))
965 val |= DMA_TLB_READ_DRAIN;
966#endif
967 if (cap_write_drain(iommu->cap))
968 val |= DMA_TLB_WRITE_DRAIN;
969
970 spin_lock_irqsave(&iommu->register_lock, flag);
971 /* Note: Only uses first TLB reg currently */
972 if (val_iva)
973 dmar_writeq(iommu->reg + tlb_offset, val_iva);
974 dmar_writeq(iommu->reg + tlb_offset + 8, val);
975
976 /* Make sure hardware complete it */
977 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
978 dmar_readq, (!(val & DMA_TLB_IVT)), val);
979
980 spin_unlock_irqrestore(&iommu->register_lock, flag);
981
982 /* check IOTLB invalidation granularity */
983 if (DMA_TLB_IAIG(val) == 0)
984 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
985 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
986 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700987 (unsigned long long)DMA_TLB_IIRG(type),
988 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989}
990
Yu Zhao93a23a72009-05-18 13:51:37 +0800991static struct device_domain_info *iommu_support_dev_iotlb(
992 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700993{
Yu Zhao93a23a72009-05-18 13:51:37 +0800994 int found = 0;
995 unsigned long flags;
996 struct device_domain_info *info;
997 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
998
999 if (!ecap_dev_iotlb_support(iommu->ecap))
1000 return NULL;
1001
1002 if (!iommu->qi)
1003 return NULL;
1004
1005 spin_lock_irqsave(&device_domain_lock, flags);
1006 list_for_each_entry(info, &domain->devices, link)
1007 if (info->bus == bus && info->devfn == devfn) {
1008 found = 1;
1009 break;
1010 }
1011 spin_unlock_irqrestore(&device_domain_lock, flags);
1012
1013 if (!found || !info->dev)
1014 return NULL;
1015
1016 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1017 return NULL;
1018
1019 if (!dmar_find_matched_atsr_unit(info->dev))
1020 return NULL;
1021
1022 info->iommu = iommu;
1023
1024 return info;
1025}
1026
1027static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1028{
1029 if (!info)
1030 return;
1031
1032 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1033}
1034
1035static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1036{
1037 if (!info->dev || !pci_ats_enabled(info->dev))
1038 return;
1039
1040 pci_disable_ats(info->dev);
1041}
1042
1043static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1044 u64 addr, unsigned mask)
1045{
1046 u16 sid, qdep;
1047 unsigned long flags;
1048 struct device_domain_info *info;
1049
1050 spin_lock_irqsave(&device_domain_lock, flags);
1051 list_for_each_entry(info, &domain->devices, link) {
1052 if (!info->dev || !pci_ats_enabled(info->dev))
1053 continue;
1054
1055 sid = info->bus << 8 | info->devfn;
1056 qdep = pci_ats_queue_depth(info->dev);
1057 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1058 }
1059 spin_unlock_irqrestore(&device_domain_lock, flags);
1060}
1061
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001062static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001063 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001064{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001065 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001066 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001067
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001068 BUG_ON(pages == 0);
1069
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001070 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001071 * Fallback to domain selective flush if no PSI support or the size is
1072 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001073 * PSI requires page size to be 2 ^ x, and the base address is naturally
1074 * aligned to the size
1075 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001076 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1077 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001078 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001079 else
1080 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1081 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001082
1083 /*
Nadav Amit82653632010-04-01 13:24:40 +03001084 * In caching mode, changes of pages from non-present to present require
1085 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001086 */
Nadav Amit82653632010-04-01 13:24:40 +03001087 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001088 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001089}
1090
mark grossf8bab732008-02-08 04:18:38 -08001091static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1092{
1093 u32 pmen;
1094 unsigned long flags;
1095
1096 spin_lock_irqsave(&iommu->register_lock, flags);
1097 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1098 pmen &= ~DMA_PMEN_EPM;
1099 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1100
1101 /* wait for the protected region status bit to clear */
1102 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1103 readl, !(pmen & DMA_PMEN_PRS), pmen);
1104
1105 spin_unlock_irqrestore(&iommu->register_lock, flags);
1106}
1107
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001108static int iommu_enable_translation(struct intel_iommu *iommu)
1109{
1110 u32 sts;
1111 unsigned long flags;
1112
1113 spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001114 iommu->gcmd |= DMA_GCMD_TE;
1115 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001116
1117 /* Make sure hardware complete it */
1118 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001119 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001120
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001121 spin_unlock_irqrestore(&iommu->register_lock, flags);
1122 return 0;
1123}
1124
1125static int iommu_disable_translation(struct intel_iommu *iommu)
1126{
1127 u32 sts;
1128 unsigned long flag;
1129
1130 spin_lock_irqsave(&iommu->register_lock, flag);
1131 iommu->gcmd &= ~DMA_GCMD_TE;
1132 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1133
1134 /* Make sure hardware complete it */
1135 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001136 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137
1138 spin_unlock_irqrestore(&iommu->register_lock, flag);
1139 return 0;
1140}
1141
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001142
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001143static int iommu_init_domains(struct intel_iommu *iommu)
1144{
1145 unsigned long ndomains;
1146 unsigned long nlongs;
1147
1148 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001149 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1150 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001151 nlongs = BITS_TO_LONGS(ndomains);
1152
Donald Dutile94a91b52009-08-20 16:51:34 -04001153 spin_lock_init(&iommu->lock);
1154
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155 /* TBD: there might be 64K domains,
1156 * consider other allocation for future chip
1157 */
1158 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1159 if (!iommu->domain_ids) {
1160 printk(KERN_ERR "Allocating domain id array failed\n");
1161 return -ENOMEM;
1162 }
1163 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1164 GFP_KERNEL);
1165 if (!iommu->domains) {
1166 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001167 return -ENOMEM;
1168 }
1169
1170 /*
1171 * if Caching mode is set, then invalid translations are tagged
1172 * with domainid 0. Hence we need to pre-allocate it.
1173 */
1174 if (cap_caching_mode(iommu->cap))
1175 set_bit(0, iommu->domain_ids);
1176 return 0;
1177}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001178
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001179
1180static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001181static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001182
1183void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184{
1185 struct dmar_domain *domain;
1186 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001187 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001188
Donald Dutile94a91b52009-08-20 16:51:34 -04001189 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001190 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001191 domain = iommu->domains[i];
1192 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001193
Donald Dutile94a91b52009-08-20 16:51:34 -04001194 spin_lock_irqsave(&domain->iommu_lock, flags);
1195 if (--domain->iommu_count == 0) {
1196 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1197 vm_domain_exit(domain);
1198 else
1199 domain_exit(domain);
1200 }
1201 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001202 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001203 }
1204
1205 if (iommu->gcmd & DMA_GCMD_TE)
1206 iommu_disable_translation(iommu);
1207
1208 if (iommu->irq) {
1209 set_irq_data(iommu->irq, NULL);
1210 /* This will mask the irq */
1211 free_irq(iommu->irq, iommu);
1212 destroy_irq(iommu->irq);
1213 }
1214
1215 kfree(iommu->domains);
1216 kfree(iommu->domain_ids);
1217
Weidong Hand9630fe2008-12-08 11:06:32 +08001218 g_iommus[iommu->seq_id] = NULL;
1219
1220 /* if all iommus are freed, free g_iommus */
1221 for (i = 0; i < g_num_of_iommus; i++) {
1222 if (g_iommus[i])
1223 break;
1224 }
1225
1226 if (i == g_num_of_iommus)
1227 kfree(g_iommus);
1228
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001229 /* free context mapping */
1230 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231}
1232
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001233static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001235 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001236
1237 domain = alloc_domain_mem();
1238 if (!domain)
1239 return NULL;
1240
Suresh Siddha4c923d42009-10-02 11:01:24 -07001241 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001242 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001243 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001244
1245 return domain;
1246}
1247
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001248static int iommu_attach_domain(struct dmar_domain *domain,
1249 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001250{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001251 int num;
1252 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001253 unsigned long flags;
1254
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001255 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001256
1257 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001258
1259 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1260 if (num >= ndomains) {
1261 spin_unlock_irqrestore(&iommu->lock, flags);
1262 printk(KERN_ERR "IOMMU: no free domain ids\n");
1263 return -ENOMEM;
1264 }
1265
1266 domain->id = num;
1267 set_bit(num, iommu->domain_ids);
1268 set_bit(iommu->seq_id, &domain->iommu_bmp);
1269 iommu->domains[num] = domain;
1270 spin_unlock_irqrestore(&iommu->lock, flags);
1271
1272 return 0;
1273}
1274
1275static void iommu_detach_domain(struct dmar_domain *domain,
1276 struct intel_iommu *iommu)
1277{
1278 unsigned long flags;
1279 int num, ndomains;
1280 int found = 0;
1281
1282 spin_lock_irqsave(&iommu->lock, flags);
1283 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001284 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001285 if (iommu->domains[num] == domain) {
1286 found = 1;
1287 break;
1288 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001289 }
1290
1291 if (found) {
1292 clear_bit(num, iommu->domain_ids);
1293 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1294 iommu->domains[num] = NULL;
1295 }
Weidong Han8c11e792008-12-08 15:29:22 +08001296 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001297}
1298
1299static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001300static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301
1302static void dmar_init_reserved_ranges(void)
1303{
1304 struct pci_dev *pdev = NULL;
1305 struct iova *iova;
1306 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307
David Millerf6611972008-02-06 01:36:23 -08001308 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
Mark Gross8a443df2008-03-04 14:59:31 -08001310 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1311 &reserved_rbtree_key);
1312
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001313 /* IOAPIC ranges shouldn't be accessed by DMA */
1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1315 IOVA_PFN(IOAPIC_RANGE_END));
1316 if (!iova)
1317 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1318
1319 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1320 for_each_pci_dev(pdev) {
1321 struct resource *r;
1322
1323 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1324 r = &pdev->resource[i];
1325 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1326 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001327 iova = reserve_iova(&reserved_iova_list,
1328 IOVA_PFN(r->start),
1329 IOVA_PFN(r->end));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001330 if (!iova)
1331 printk(KERN_ERR "Reserve iova failed\n");
1332 }
1333 }
1334
1335}
1336
1337static void domain_reserve_special_ranges(struct dmar_domain *domain)
1338{
1339 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1340}
1341
1342static inline int guestwidth_to_adjustwidth(int gaw)
1343{
1344 int agaw;
1345 int r = (gaw - 12) % 9;
1346
1347 if (r == 0)
1348 agaw = gaw;
1349 else
1350 agaw = gaw + 9 - r;
1351 if (agaw > 64)
1352 agaw = 64;
1353 return agaw;
1354}
1355
1356static int domain_init(struct dmar_domain *domain, int guest_width)
1357{
1358 struct intel_iommu *iommu;
1359 int adjust_width, agaw;
1360 unsigned long sagaw;
1361
David Millerf6611972008-02-06 01:36:23 -08001362 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001363 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001364
1365 domain_reserve_special_ranges(domain);
1366
1367 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001368 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001369 if (guest_width > cap_mgaw(iommu->cap))
1370 guest_width = cap_mgaw(iommu->cap);
1371 domain->gaw = guest_width;
1372 adjust_width = guestwidth_to_adjustwidth(guest_width);
1373 agaw = width_to_agaw(adjust_width);
1374 sagaw = cap_sagaw(iommu->cap);
1375 if (!test_bit(agaw, &sagaw)) {
1376 /* hardware doesn't support it, choose a bigger one */
1377 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1378 agaw = find_next_bit(&sagaw, 5, agaw);
1379 if (agaw >= 5)
1380 return -ENODEV;
1381 }
1382 domain->agaw = agaw;
1383 INIT_LIST_HEAD(&domain->devices);
1384
Weidong Han8e6040972008-12-08 15:49:06 +08001385 if (ecap_coherent(iommu->ecap))
1386 domain->iommu_coherency = 1;
1387 else
1388 domain->iommu_coherency = 0;
1389
Sheng Yang58c610b2009-03-18 15:33:05 +08001390 if (ecap_sc_support(iommu->ecap))
1391 domain->iommu_snooping = 1;
1392 else
1393 domain->iommu_snooping = 0;
1394
Weidong Hanc7151a82008-12-08 22:51:37 +08001395 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001396 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001397
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001399 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400 if (!domain->pgd)
1401 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001402 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001403 return 0;
1404}
1405
1406static void domain_exit(struct dmar_domain *domain)
1407{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001408 struct dmar_drhd_unit *drhd;
1409 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001410
1411 /* Domain 0 is reserved, so dont process it */
1412 if (!domain)
1413 return;
1414
1415 domain_remove_dev_info(domain);
1416 /* destroy iovas */
1417 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001418
1419 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001420 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001421
1422 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001423 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001425 for_each_active_iommu(iommu, drhd)
1426 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1427 iommu_detach_domain(domain, iommu);
1428
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001429 free_domain_mem(domain);
1430}
1431
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001432static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1433 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001434{
1435 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001436 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001437 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001438 struct dma_pte *pgd;
1439 unsigned long num;
1440 unsigned long ndomains;
1441 int id;
1442 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001443 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444
1445 pr_debug("Set context mapping for %02x:%02x.%d\n",
1446 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001447
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001448 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001449 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1450 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001451
David Woodhouse276dbf992009-04-04 01:45:37 +01001452 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001453 if (!iommu)
1454 return -ENODEV;
1455
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001456 context = device_to_context_entry(iommu, bus, devfn);
1457 if (!context)
1458 return -ENOMEM;
1459 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001460 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001461 spin_unlock_irqrestore(&iommu->lock, flags);
1462 return 0;
1463 }
1464
Weidong Hanea6606b2008-12-08 23:08:15 +08001465 id = domain->id;
1466 pgd = domain->pgd;
1467
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001468 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1469 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001470 int found = 0;
1471
1472 /* find an available domain id for this device in iommu */
1473 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001474 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001475 if (iommu->domains[num] == domain) {
1476 id = num;
1477 found = 1;
1478 break;
1479 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001480 }
1481
1482 if (found == 0) {
1483 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1484 if (num >= ndomains) {
1485 spin_unlock_irqrestore(&iommu->lock, flags);
1486 printk(KERN_ERR "IOMMU: no free domain ids\n");
1487 return -EFAULT;
1488 }
1489
1490 set_bit(num, iommu->domain_ids);
1491 iommu->domains[num] = domain;
1492 id = num;
1493 }
1494
1495 /* Skip top levels of page tables for
1496 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001497 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001498 */
Chris Wright1672af12009-12-02 12:06:34 -08001499 if (translation != CONTEXT_TT_PASS_THROUGH) {
1500 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1501 pgd = phys_to_virt(dma_pte_addr(pgd));
1502 if (!dma_pte_present(pgd)) {
1503 spin_unlock_irqrestore(&iommu->lock, flags);
1504 return -ENOMEM;
1505 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001506 }
1507 }
1508 }
1509
1510 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001511
Yu Zhao93a23a72009-05-18 13:51:37 +08001512 if (translation != CONTEXT_TT_PASS_THROUGH) {
1513 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1514 translation = info ? CONTEXT_TT_DEV_IOTLB :
1515 CONTEXT_TT_MULTI_LEVEL;
1516 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001517 /*
1518 * In pass through mode, AW must be programmed to indicate the largest
1519 * AGAW value supported by hardware. And ASR is ignored by hardware.
1520 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001521 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001522 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001523 else {
1524 context_set_address_root(context, virt_to_phys(pgd));
1525 context_set_address_width(context, iommu->agaw);
1526 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001527
1528 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001529 context_set_fault_enable(context);
1530 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001531 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001532
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001533 /*
1534 * It's a non-present to present mapping. If hardware doesn't cache
1535 * non-present entry we only need to flush the write-buffer. If the
1536 * _does_ cache non-present entries, then it does so in the special
1537 * domain #0, which we have to flush:
1538 */
1539 if (cap_caching_mode(iommu->cap)) {
1540 iommu->flush.flush_context(iommu, 0,
1541 (((u16)bus) << 8) | devfn,
1542 DMA_CCMD_MASK_NOBIT,
1543 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001544 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001545 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001547 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001548 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001549 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001550
1551 spin_lock_irqsave(&domain->iommu_lock, flags);
1552 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1553 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001554 if (domain->iommu_count == 1)
1555 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001556 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001557 }
1558 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001559 return 0;
1560}
1561
1562static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001563domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1564 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001565{
1566 int ret;
1567 struct pci_dev *tmp, *parent;
1568
David Woodhouse276dbf992009-04-04 01:45:37 +01001569 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001570 pdev->bus->number, pdev->devfn,
1571 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001572 if (ret)
1573 return ret;
1574
1575 /* dependent device mapping */
1576 tmp = pci_find_upstream_pcie_bridge(pdev);
1577 if (!tmp)
1578 return 0;
1579 /* Secondary interface's bus number and devfn 0 */
1580 parent = pdev->bus->self;
1581 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001582 ret = domain_context_mapping_one(domain,
1583 pci_domain_nr(parent->bus),
1584 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001585 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001586 if (ret)
1587 return ret;
1588 parent = parent->bus->self;
1589 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001590 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001591 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001592 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001593 tmp->subordinate->number, 0,
1594 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001595 else /* this is a legacy PCI bridge */
1596 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001597 pci_domain_nr(tmp->bus),
1598 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001599 tmp->devfn,
1600 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001601}
1602
Weidong Han5331fe62008-12-08 23:00:00 +08001603static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001604{
1605 int ret;
1606 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001607 struct intel_iommu *iommu;
1608
David Woodhouse276dbf992009-04-04 01:45:37 +01001609 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1610 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001611 if (!iommu)
1612 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001613
David Woodhouse276dbf992009-04-04 01:45:37 +01001614 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001615 if (!ret)
1616 return ret;
1617 /* dependent device mapping */
1618 tmp = pci_find_upstream_pcie_bridge(pdev);
1619 if (!tmp)
1620 return ret;
1621 /* Secondary interface's bus number and devfn 0 */
1622 parent = pdev->bus->self;
1623 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001624 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001625 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001626 if (!ret)
1627 return ret;
1628 parent = parent->bus->self;
1629 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001630 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001631 return device_context_mapped(iommu, tmp->subordinate->number,
1632 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001633 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001634 return device_context_mapped(iommu, tmp->bus->number,
1635 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001636}
1637
Fenghua Yuf5329592009-08-04 15:09:37 -07001638/* Returns a number of VTD pages, but aligned to MM page size */
1639static inline unsigned long aligned_nrpages(unsigned long host_addr,
1640 size_t size)
1641{
1642 host_addr &= ~PAGE_MASK;
1643 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1644}
1645
David Woodhouse9051aa02009-06-29 12:30:54 +01001646static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1647 struct scatterlist *sg, unsigned long phys_pfn,
1648 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001649{
1650 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001651 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001652 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001653 unsigned long sg_res;
David Woodhousee1605492009-06-29 11:17:38 +01001654
1655 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1656
1657 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1658 return -EINVAL;
1659
1660 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1661
David Woodhouse9051aa02009-06-29 12:30:54 +01001662 if (sg)
1663 sg_res = 0;
1664 else {
1665 sg_res = nr_pages + 1;
1666 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1667 }
1668
David Woodhousee1605492009-06-29 11:17:38 +01001669 while (nr_pages--) {
David Woodhousec85994e2009-07-01 19:21:24 +01001670 uint64_t tmp;
1671
David Woodhousee1605492009-06-29 11:17:38 +01001672 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001673 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001674 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1675 sg->dma_length = sg->length;
1676 pteval = page_to_phys(sg_page(sg)) | prot;
1677 }
1678 if (!pte) {
1679 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1680 if (!pte)
1681 return -ENOMEM;
1682 }
1683 /* We don't need lock here, nobody else
1684 * touches the iova range
1685 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001686 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001687 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001688 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001689 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1690 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001691 if (dumps) {
1692 dumps--;
1693 debug_dma_dump_mappings(NULL);
1694 }
1695 WARN_ON(1);
1696 }
David Woodhousee1605492009-06-29 11:17:38 +01001697 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +01001698 if (!nr_pages || first_pte_in_page(pte)) {
David Woodhousee1605492009-06-29 11:17:38 +01001699 domain_flush_cache(domain, first_pte,
1700 (void *)pte - (void *)first_pte);
1701 pte = NULL;
1702 }
1703 iov_pfn++;
1704 pteval += VTD_PAGE_SIZE;
1705 sg_res--;
1706 if (!sg_res)
1707 sg = sg_next(sg);
1708 }
1709 return 0;
1710}
1711
David Woodhouse9051aa02009-06-29 12:30:54 +01001712static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1713 struct scatterlist *sg, unsigned long nr_pages,
1714 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715{
David Woodhouse9051aa02009-06-29 12:30:54 +01001716 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1717}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001718
David Woodhouse9051aa02009-06-29 12:30:54 +01001719static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1720 unsigned long phys_pfn, unsigned long nr_pages,
1721 int prot)
1722{
1723 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001724}
1725
Weidong Hanc7151a82008-12-08 22:51:37 +08001726static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001727{
Weidong Hanc7151a82008-12-08 22:51:37 +08001728 if (!iommu)
1729 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001730
1731 clear_context_table(iommu, bus, devfn);
1732 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001733 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001734 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001735}
1736
1737static void domain_remove_dev_info(struct dmar_domain *domain)
1738{
1739 struct device_domain_info *info;
1740 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001741 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001742
1743 spin_lock_irqsave(&device_domain_lock, flags);
1744 while (!list_empty(&domain->devices)) {
1745 info = list_entry(domain->devices.next,
1746 struct device_domain_info, link);
1747 list_del(&info->link);
1748 list_del(&info->global);
1749 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001750 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001751 spin_unlock_irqrestore(&device_domain_lock, flags);
1752
Yu Zhao93a23a72009-05-18 13:51:37 +08001753 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001754 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001755 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001756 free_devinfo_mem(info);
1757
1758 spin_lock_irqsave(&device_domain_lock, flags);
1759 }
1760 spin_unlock_irqrestore(&device_domain_lock, flags);
1761}
1762
1763/*
1764 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001765 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001766 */
Kay, Allen M38717942008-09-09 18:37:29 +03001767static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001768find_domain(struct pci_dev *pdev)
1769{
1770 struct device_domain_info *info;
1771
1772 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001773 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001774 if (info)
1775 return info->domain;
1776 return NULL;
1777}
1778
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001779/* domain is initialized */
1780static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1781{
1782 struct dmar_domain *domain, *found = NULL;
1783 struct intel_iommu *iommu;
1784 struct dmar_drhd_unit *drhd;
1785 struct device_domain_info *info, *tmp;
1786 struct pci_dev *dev_tmp;
1787 unsigned long flags;
1788 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001789 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001790 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001791
1792 domain = find_domain(pdev);
1793 if (domain)
1794 return domain;
1795
David Woodhouse276dbf992009-04-04 01:45:37 +01001796 segment = pci_domain_nr(pdev->bus);
1797
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001798 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1799 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001800 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001801 bus = dev_tmp->subordinate->number;
1802 devfn = 0;
1803 } else {
1804 bus = dev_tmp->bus->number;
1805 devfn = dev_tmp->devfn;
1806 }
1807 spin_lock_irqsave(&device_domain_lock, flags);
1808 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001809 if (info->segment == segment &&
1810 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001811 found = info->domain;
1812 break;
1813 }
1814 }
1815 spin_unlock_irqrestore(&device_domain_lock, flags);
1816 /* pcie-pci bridge already has a domain, uses it */
1817 if (found) {
1818 domain = found;
1819 goto found_domain;
1820 }
1821 }
1822
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001823 domain = alloc_domain();
1824 if (!domain)
1825 goto error;
1826
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001827 /* Allocate new domain for the device */
1828 drhd = dmar_find_matched_drhd_unit(pdev);
1829 if (!drhd) {
1830 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1831 pci_name(pdev));
1832 return NULL;
1833 }
1834 iommu = drhd->iommu;
1835
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001836 ret = iommu_attach_domain(domain, iommu);
1837 if (ret) {
1838 domain_exit(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001839 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001840 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001841
1842 if (domain_init(domain, gaw)) {
1843 domain_exit(domain);
1844 goto error;
1845 }
1846
1847 /* register pcie-to-pci device */
1848 if (dev_tmp) {
1849 info = alloc_devinfo_mem();
1850 if (!info) {
1851 domain_exit(domain);
1852 goto error;
1853 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001854 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001855 info->bus = bus;
1856 info->devfn = devfn;
1857 info->dev = NULL;
1858 info->domain = domain;
1859 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08001860 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001861
1862 /* pcie-to-pci bridge already has a domain, uses it */
1863 found = NULL;
1864 spin_lock_irqsave(&device_domain_lock, flags);
1865 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001866 if (tmp->segment == segment &&
1867 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868 found = tmp->domain;
1869 break;
1870 }
1871 }
1872 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02001873 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874 free_devinfo_mem(info);
1875 domain_exit(domain);
1876 domain = found;
1877 } else {
1878 list_add(&info->link, &domain->devices);
1879 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02001880 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001881 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882 }
1883
1884found_domain:
1885 info = alloc_devinfo_mem();
1886 if (!info)
1887 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01001888 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889 info->bus = pdev->bus->number;
1890 info->devfn = pdev->devfn;
1891 info->dev = pdev;
1892 info->domain = domain;
1893 spin_lock_irqsave(&device_domain_lock, flags);
1894 /* somebody is fast */
1895 found = find_domain(pdev);
1896 if (found != NULL) {
1897 spin_unlock_irqrestore(&device_domain_lock, flags);
1898 if (found != domain) {
1899 domain_exit(domain);
1900 domain = found;
1901 }
1902 free_devinfo_mem(info);
1903 return domain;
1904 }
1905 list_add(&info->link, &domain->devices);
1906 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001907 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001908 spin_unlock_irqrestore(&device_domain_lock, flags);
1909 return domain;
1910error:
1911 /* recheck it here, maybe others set it */
1912 return find_domain(pdev);
1913}
1914
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001915static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07001916#define IDENTMAP_ALL 1
1917#define IDENTMAP_GFX 2
1918#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001919
David Woodhouseb2132032009-06-26 18:50:28 +01001920static int iommu_domain_identity_map(struct dmar_domain *domain,
1921 unsigned long long start,
1922 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923{
David Woodhousec5395d52009-06-28 16:35:56 +01001924 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1925 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926
David Woodhousec5395d52009-06-28 16:35:56 +01001927 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1928 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001929 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01001930 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001931 }
1932
David Woodhousec5395d52009-06-28 16:35:56 +01001933 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1934 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935 /*
1936 * RMRR range might have overlap with physical memory range,
1937 * clear it first
1938 */
David Woodhousec5395d52009-06-28 16:35:56 +01001939 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001940
David Woodhousec5395d52009-06-28 16:35:56 +01001941 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1942 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01001943 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01001944}
1945
1946static int iommu_prepare_identity_map(struct pci_dev *pdev,
1947 unsigned long long start,
1948 unsigned long long end)
1949{
1950 struct dmar_domain *domain;
1951 int ret;
1952
David Woodhousec7ab48d2009-06-26 19:10:36 +01001953 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01001954 if (!domain)
1955 return -ENOMEM;
1956
David Woodhouse19943b02009-08-04 16:19:20 +01001957 /* For _hardware_ passthrough, don't bother. But for software
1958 passthrough, we do it anyway -- it may indicate a memory
1959 range which is reserved in E820, so which didn't get set
1960 up to start with in si_domain */
1961 if (domain == si_domain && hw_pass_through) {
1962 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1963 pci_name(pdev), start, end);
1964 return 0;
1965 }
1966
1967 printk(KERN_INFO
1968 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1969 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01001970
David Woodhouse5595b522009-12-02 09:21:55 +00001971 if (end < start) {
1972 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
1973 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1974 dmi_get_system_info(DMI_BIOS_VENDOR),
1975 dmi_get_system_info(DMI_BIOS_VERSION),
1976 dmi_get_system_info(DMI_PRODUCT_VERSION));
1977 ret = -EIO;
1978 goto error;
1979 }
1980
David Woodhouse2ff729f2009-08-26 14:25:41 +01001981 if (end >> agaw_to_width(domain->agaw)) {
1982 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1983 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1984 agaw_to_width(domain->agaw),
1985 dmi_get_system_info(DMI_BIOS_VENDOR),
1986 dmi_get_system_info(DMI_BIOS_VERSION),
1987 dmi_get_system_info(DMI_PRODUCT_VERSION));
1988 ret = -EIO;
1989 goto error;
1990 }
David Woodhouse19943b02009-08-04 16:19:20 +01001991
David Woodhouseb2132032009-06-26 18:50:28 +01001992 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001993 if (ret)
1994 goto error;
1995
1996 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001997 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01001998 if (ret)
1999 goto error;
2000
2001 return 0;
2002
2003 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002004 domain_exit(domain);
2005 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002006}
2007
2008static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2009 struct pci_dev *pdev)
2010{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002011 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002012 return 0;
2013 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2014 rmrr->end_address + 1);
2015}
2016
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002017#ifdef CONFIG_DMAR_FLOPPY_WA
2018static inline void iommu_prepare_isa(void)
2019{
2020 struct pci_dev *pdev;
2021 int ret;
2022
2023 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2024 if (!pdev)
2025 return;
2026
David Woodhousec7ab48d2009-06-26 19:10:36 +01002027 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002028 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2029
2030 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002031 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2032 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002033
2034}
2035#else
2036static inline void iommu_prepare_isa(void)
2037{
2038 return;
2039}
2040#endif /* !CONFIG_DMAR_FLPY_WA */
2041
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002042static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002043
2044static int __init si_domain_work_fn(unsigned long start_pfn,
2045 unsigned long end_pfn, void *datax)
2046{
2047 int *ret = datax;
2048
2049 *ret = iommu_domain_identity_map(si_domain,
2050 (uint64_t)start_pfn << PAGE_SHIFT,
2051 (uint64_t)end_pfn << PAGE_SHIFT);
2052 return *ret;
2053
2054}
2055
Matt Kraai071e1372009-08-23 22:30:22 -07002056static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002057{
2058 struct dmar_drhd_unit *drhd;
2059 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002060 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002061
2062 si_domain = alloc_domain();
2063 if (!si_domain)
2064 return -EFAULT;
2065
David Woodhousec7ab48d2009-06-26 19:10:36 +01002066 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002067
2068 for_each_active_iommu(iommu, drhd) {
2069 ret = iommu_attach_domain(si_domain, iommu);
2070 if (ret) {
2071 domain_exit(si_domain);
2072 return -EFAULT;
2073 }
2074 }
2075
2076 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2077 domain_exit(si_domain);
2078 return -EFAULT;
2079 }
2080
2081 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2082
David Woodhouse19943b02009-08-04 16:19:20 +01002083 if (hw)
2084 return 0;
2085
David Woodhousec7ab48d2009-06-26 19:10:36 +01002086 for_each_online_node(nid) {
2087 work_with_active_regions(nid, si_domain_work_fn, &ret);
2088 if (ret)
2089 return ret;
2090 }
2091
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002092 return 0;
2093}
2094
2095static void domain_remove_one_dev_info(struct dmar_domain *domain,
2096 struct pci_dev *pdev);
2097static int identity_mapping(struct pci_dev *pdev)
2098{
2099 struct device_domain_info *info;
2100
2101 if (likely(!iommu_identity_mapping))
2102 return 0;
2103
2104
2105 list_for_each_entry(info, &si_domain->devices, link)
2106 if (info->dev == pdev)
2107 return 1;
2108 return 0;
2109}
2110
2111static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002112 struct pci_dev *pdev,
2113 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002114{
2115 struct device_domain_info *info;
2116 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002117 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002118
2119 info = alloc_devinfo_mem();
2120 if (!info)
2121 return -ENOMEM;
2122
David Woodhouse5fe60f42009-08-09 10:53:41 +01002123 ret = domain_context_mapping(domain, pdev, translation);
2124 if (ret) {
2125 free_devinfo_mem(info);
2126 return ret;
2127 }
2128
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002129 info->segment = pci_domain_nr(pdev->bus);
2130 info->bus = pdev->bus->number;
2131 info->devfn = pdev->devfn;
2132 info->dev = pdev;
2133 info->domain = domain;
2134
2135 spin_lock_irqsave(&device_domain_lock, flags);
2136 list_add(&info->link, &domain->devices);
2137 list_add(&info->global, &device_domain_list);
2138 pdev->dev.archdata.iommu = info;
2139 spin_unlock_irqrestore(&device_domain_lock, flags);
2140
2141 return 0;
2142}
2143
David Woodhouse6941af22009-07-04 18:24:27 +01002144static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2145{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002146 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2147 return 1;
2148
2149 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2150 return 1;
2151
2152 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2153 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002154
David Woodhouse3dfc8132009-07-04 19:11:08 +01002155 /*
2156 * We want to start off with all devices in the 1:1 domain, and
2157 * take them out later if we find they can't access all of memory.
2158 *
2159 * However, we can't do this for PCI devices behind bridges,
2160 * because all PCI devices behind the same bridge will end up
2161 * with the same source-id on their transactions.
2162 *
2163 * Practically speaking, we can't change things around for these
2164 * devices at run-time, because we can't be sure there'll be no
2165 * DMA transactions in flight for any of their siblings.
2166 *
2167 * So PCI devices (unless they're on the root bus) as well as
2168 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2169 * the 1:1 domain, just in _case_ one of their siblings turns out
2170 * not to be able to map all of memory.
2171 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002172 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002173 if (!pci_is_root_bus(pdev->bus))
2174 return 0;
2175 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2176 return 0;
2177 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2178 return 0;
2179
2180 /*
2181 * At boot time, we don't yet know if devices will be 64-bit capable.
2182 * Assume that they will -- if they turn out not to be, then we can
2183 * take them out of the 1:1 domain later.
2184 */
David Woodhouse6941af22009-07-04 18:24:27 +01002185 if (!startup)
2186 return pdev->dma_mask > DMA_BIT_MASK(32);
2187
2188 return 1;
2189}
2190
Matt Kraai071e1372009-08-23 22:30:22 -07002191static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002192{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002193 struct pci_dev *pdev = NULL;
2194 int ret;
2195
David Woodhouse19943b02009-08-04 16:19:20 +01002196 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002197 if (ret)
2198 return -EFAULT;
2199
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002200 for_each_pci_dev(pdev) {
David Woodhouse6941af22009-07-04 18:24:27 +01002201 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002202 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2203 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002204
David Woodhouse5fe60f42009-08-09 10:53:41 +01002205 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002206 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002207 CONTEXT_TT_MULTI_LEVEL);
2208 if (ret)
2209 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002210 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002211 }
2212
2213 return 0;
2214}
2215
2216int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002217{
2218 struct dmar_drhd_unit *drhd;
2219 struct dmar_rmrr_unit *rmrr;
2220 struct pci_dev *pdev;
2221 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002222 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002223
2224 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002225 * for each drhd
2226 * allocate root
2227 * initialize and program root entry to not present
2228 * endfor
2229 */
2230 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002231 g_num_of_iommus++;
2232 /*
2233 * lock not needed as this is only incremented in the single
2234 * threaded kernel __init code path all other access are read
2235 * only
2236 */
2237 }
2238
Weidong Hand9630fe2008-12-08 11:06:32 +08002239 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2240 GFP_KERNEL);
2241 if (!g_iommus) {
2242 printk(KERN_ERR "Allocating global iommu array failed\n");
2243 ret = -ENOMEM;
2244 goto error;
2245 }
2246
mark gross80b20dd2008-04-18 13:53:58 -07002247 deferred_flush = kzalloc(g_num_of_iommus *
2248 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2249 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002250 ret = -ENOMEM;
2251 goto error;
2252 }
2253
mark gross5e0d2a62008-03-04 15:22:08 -08002254 for_each_drhd_unit(drhd) {
2255 if (drhd->ignored)
2256 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002257
2258 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002259 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002260
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002261 ret = iommu_init_domains(iommu);
2262 if (ret)
2263 goto error;
2264
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002265 /*
2266 * TBD:
2267 * we could share the same root & context tables
2268 * amoung all IOMMU's. Need to Split it later.
2269 */
2270 ret = iommu_alloc_root_entry(iommu);
2271 if (ret) {
2272 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2273 goto error;
2274 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002275 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002276 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002277 }
2278
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002279 /*
2280 * Start from the sane iommu hardware state.
2281 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002282 for_each_drhd_unit(drhd) {
2283 if (drhd->ignored)
2284 continue;
2285
2286 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002287
2288 /*
2289 * If the queued invalidation is already initialized by us
2290 * (for example, while enabling interrupt-remapping) then
2291 * we got the things already rolling from a sane state.
2292 */
2293 if (iommu->qi)
2294 continue;
2295
2296 /*
2297 * Clear any previous faults.
2298 */
2299 dmar_fault(-1, iommu);
2300 /*
2301 * Disable queued invalidation if supported and already enabled
2302 * before OS handover.
2303 */
2304 dmar_disable_qi(iommu);
2305 }
2306
2307 for_each_drhd_unit(drhd) {
2308 if (drhd->ignored)
2309 continue;
2310
2311 iommu = drhd->iommu;
2312
Youquan Songa77b67d2008-10-16 16:31:56 -07002313 if (dmar_enable_qi(iommu)) {
2314 /*
2315 * Queued Invalidate not enabled, use Register Based
2316 * Invalidate
2317 */
2318 iommu->flush.flush_context = __iommu_flush_context;
2319 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002320 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002321 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002322 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002323 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002324 } else {
2325 iommu->flush.flush_context = qi_flush_context;
2326 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002327 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002328 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002329 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002330 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002331 }
2332 }
2333
David Woodhouse19943b02009-08-04 16:19:20 +01002334 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002335 iommu_identity_mapping |= IDENTMAP_ALL;
2336
David Woodhouse19943b02009-08-04 16:19:20 +01002337#ifdef CONFIG_DMAR_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002338 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002339#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002340
2341 check_tylersburg_isoch();
2342
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002343 /*
2344 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002345 * identity mappings for rmrr, gfx, and isa and may fall back to static
2346 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002347 */
David Woodhouse19943b02009-08-04 16:19:20 +01002348 if (iommu_identity_mapping) {
2349 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2350 if (ret) {
2351 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2352 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002353 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002354 }
David Woodhouse19943b02009-08-04 16:19:20 +01002355 /*
2356 * For each rmrr
2357 * for each dev attached to rmrr
2358 * do
2359 * locate drhd for dev, alloc domain for dev
2360 * allocate free domain
2361 * allocate page table entries for rmrr
2362 * if context not allocated for bus
2363 * allocate and init context
2364 * set present in root table for this bus
2365 * init context with domain, translation etc
2366 * endfor
2367 * endfor
2368 */
2369 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2370 for_each_rmrr_units(rmrr) {
2371 for (i = 0; i < rmrr->devices_cnt; i++) {
2372 pdev = rmrr->devices[i];
2373 /*
2374 * some BIOS lists non-exist devices in DMAR
2375 * table.
2376 */
2377 if (!pdev)
2378 continue;
2379 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2380 if (ret)
2381 printk(KERN_ERR
2382 "IOMMU: mapping reserved region failed\n");
2383 }
2384 }
2385
2386 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002387
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002388 /*
2389 * for each drhd
2390 * enable fault log
2391 * global invalidate context cache
2392 * global invalidate iotlb
2393 * enable translation
2394 */
2395 for_each_drhd_unit(drhd) {
2396 if (drhd->ignored)
2397 continue;
2398 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002399
2400 iommu_flush_write_buffer(iommu);
2401
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002402 ret = dmar_set_interrupt(iommu);
2403 if (ret)
2404 goto error;
2405
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002406 iommu_set_root_entry(iommu);
2407
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002408 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002409 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002410
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002411 ret = iommu_enable_translation(iommu);
2412 if (ret)
2413 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002414
2415 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002416 }
2417
2418 return 0;
2419error:
2420 for_each_drhd_unit(drhd) {
2421 if (drhd->ignored)
2422 continue;
2423 iommu = drhd->iommu;
2424 free_iommu(iommu);
2425 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002426 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002427 return ret;
2428}
2429
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002430/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002431static struct iova *intel_alloc_iova(struct device *dev,
2432 struct dmar_domain *domain,
2433 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002434{
2435 struct pci_dev *pdev = to_pci_dev(dev);
2436 struct iova *iova = NULL;
2437
David Woodhouse875764d2009-06-28 21:20:51 +01002438 /* Restrict dma_mask to the width that the iommu can handle */
2439 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2440
2441 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002442 /*
2443 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002444 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002445 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002446 */
David Woodhouse875764d2009-06-28 21:20:51 +01002447 iova = alloc_iova(&domain->iovad, nrpages,
2448 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2449 if (iova)
2450 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002451 }
David Woodhouse875764d2009-06-28 21:20:51 +01002452 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2453 if (unlikely(!iova)) {
2454 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2455 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002456 return NULL;
2457 }
2458
2459 return iova;
2460}
2461
David Woodhouse147202a2009-07-07 19:43:20 +01002462static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002463{
2464 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002465 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002466
2467 domain = get_domain_for_dev(pdev,
2468 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2469 if (!domain) {
2470 printk(KERN_ERR
2471 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002472 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002473 }
2474
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002475 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002476 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002477 ret = domain_context_mapping(domain, pdev,
2478 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002479 if (ret) {
2480 printk(KERN_ERR
2481 "Domain context map for %s failed",
2482 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002483 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002484 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002485 }
2486
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002487 return domain;
2488}
2489
David Woodhouse147202a2009-07-07 19:43:20 +01002490static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2491{
2492 struct device_domain_info *info;
2493
2494 /* No lock here, assumes no domain exit in normal case */
2495 info = dev->dev.archdata.iommu;
2496 if (likely(info))
2497 return info->domain;
2498
2499 return __get_valid_domain_for_dev(dev);
2500}
2501
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002502static int iommu_dummy(struct pci_dev *pdev)
2503{
2504 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2505}
2506
2507/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002508static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002509{
David Woodhouse73676832009-07-04 14:08:36 +01002510 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002511 int found;
2512
David Woodhouse73676832009-07-04 14:08:36 +01002513 if (unlikely(dev->bus != &pci_bus_type))
2514 return 1;
2515
2516 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002517 if (iommu_dummy(pdev))
2518 return 1;
2519
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002520 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002521 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002522
2523 found = identity_mapping(pdev);
2524 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002525 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002526 return 1;
2527 else {
2528 /*
2529 * 32 bit DMA is removed from si_domain and fall back
2530 * to non-identity mapping.
2531 */
2532 domain_remove_one_dev_info(si_domain, pdev);
2533 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2534 pci_name(pdev));
2535 return 0;
2536 }
2537 } else {
2538 /*
2539 * In case of a detached 64 bit DMA device from vm, the device
2540 * is put into si_domain for identity mapping.
2541 */
David Woodhouse6941af22009-07-04 18:24:27 +01002542 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002543 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002544 ret = domain_add_dev_info(si_domain, pdev,
2545 hw_pass_through ?
2546 CONTEXT_TT_PASS_THROUGH :
2547 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002548 if (!ret) {
2549 printk(KERN_INFO "64bit %s uses identity mapping\n",
2550 pci_name(pdev));
2551 return 1;
2552 }
2553 }
2554 }
2555
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002556 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002557}
2558
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002559static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2560 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002561{
2562 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002563 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002564 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002565 struct iova *iova;
2566 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002567 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002568 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002569 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002570
2571 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002572
David Woodhouse73676832009-07-04 14:08:36 +01002573 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002574 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002575
2576 domain = get_valid_domain_for_dev(pdev);
2577 if (!domain)
2578 return 0;
2579
Weidong Han8c11e792008-12-08 15:29:22 +08002580 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002581 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002582
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002583 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2584 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002585 if (!iova)
2586 goto error;
2587
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002588 /*
2589 * Check if DMAR supports zero-length reads on write only
2590 * mappings..
2591 */
2592 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002593 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002594 prot |= DMA_PTE_READ;
2595 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2596 prot |= DMA_PTE_WRITE;
2597 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002598 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002599 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002600 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002601 * is not a big problem
2602 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002603 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002604 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002605 if (ret)
2606 goto error;
2607
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002608 /* it's a non-present to present mapping. Only flush if caching mode */
2609 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002610 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002611 else
Weidong Han8c11e792008-12-08 15:29:22 +08002612 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002613
David Woodhouse03d6a242009-06-28 15:33:46 +01002614 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2615 start_paddr += paddr & ~PAGE_MASK;
2616 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002617
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002618error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002619 if (iova)
2620 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002621 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002622 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002623 return 0;
2624}
2625
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002626static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2627 unsigned long offset, size_t size,
2628 enum dma_data_direction dir,
2629 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002630{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002631 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2632 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002633}
2634
mark gross5e0d2a62008-03-04 15:22:08 -08002635static void flush_unmaps(void)
2636{
mark gross80b20dd2008-04-18 13:53:58 -07002637 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002638
mark gross5e0d2a62008-03-04 15:22:08 -08002639 timer_on = 0;
2640
2641 /* just flush them all */
2642 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002643 struct intel_iommu *iommu = g_iommus[i];
2644 if (!iommu)
2645 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002646
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002647 if (!deferred_flush[i].next)
2648 continue;
2649
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002650 /* In caching mode, global flushes turn emulation expensive */
2651 if (!cap_caching_mode(iommu->cap))
2652 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002653 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002654 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002655 unsigned long mask;
2656 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002657 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002658
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002659 /* On real hardware multiple invalidations are expensive */
2660 if (cap_caching_mode(iommu->cap))
2661 iommu_flush_iotlb_psi(iommu, domain->id,
2662 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2663 else {
2664 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2665 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2666 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2667 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002668 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002669 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002670 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002671 }
2672
mark gross5e0d2a62008-03-04 15:22:08 -08002673 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002674}
2675
2676static void flush_unmaps_timeout(unsigned long data)
2677{
mark gross80b20dd2008-04-18 13:53:58 -07002678 unsigned long flags;
2679
2680 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002681 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002682 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002683}
2684
2685static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2686{
2687 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002688 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002689 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002690
2691 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002692 if (list_size == HIGH_WATER_MARK)
2693 flush_unmaps();
2694
Weidong Han8c11e792008-12-08 15:29:22 +08002695 iommu = domain_get_iommu(dom);
2696 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002697
mark gross80b20dd2008-04-18 13:53:58 -07002698 next = deferred_flush[iommu_id].next;
2699 deferred_flush[iommu_id].domain[next] = dom;
2700 deferred_flush[iommu_id].iova[next] = iova;
2701 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002702
2703 if (!timer_on) {
2704 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2705 timer_on = 1;
2706 }
2707 list_size++;
2708 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2709}
2710
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002711static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2712 size_t size, enum dma_data_direction dir,
2713 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002714{
2715 struct pci_dev *pdev = to_pci_dev(dev);
2716 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002717 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002718 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002719 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002720
David Woodhouse73676832009-07-04 14:08:36 +01002721 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002722 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002723
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002724 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002725 BUG_ON(!domain);
2726
Weidong Han8c11e792008-12-08 15:29:22 +08002727 iommu = domain_get_iommu(domain);
2728
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002730 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2731 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002732 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733
David Woodhoused794dc92009-06-28 00:27:49 +01002734 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2735 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002736
David Woodhoused794dc92009-06-28 00:27:49 +01002737 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2738 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002739
2740 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002741 dma_pte_clear_range(domain, start_pfn, last_pfn);
2742
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002743 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002744 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2745
mark gross5e0d2a62008-03-04 15:22:08 -08002746 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002747 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002748 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002749 /* free iova */
2750 __free_iova(&domain->iovad, iova);
2751 } else {
2752 add_unmap(domain, iova);
2753 /*
2754 * queue up the release of the unmap to save the 1/6th of the
2755 * cpu used up by the iotlb flush operation...
2756 */
mark gross5e0d2a62008-03-04 15:22:08 -08002757 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002758}
2759
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002760static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2761 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002762{
2763 void *vaddr;
2764 int order;
2765
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002766 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002767 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002768
2769 if (!iommu_no_mapping(hwdev))
2770 flags &= ~(GFP_DMA | GFP_DMA32);
2771 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2772 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2773 flags |= GFP_DMA;
2774 else
2775 flags |= GFP_DMA32;
2776 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002777
2778 vaddr = (void *)__get_free_pages(flags, order);
2779 if (!vaddr)
2780 return NULL;
2781 memset(vaddr, 0, size);
2782
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002783 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2784 DMA_BIDIRECTIONAL,
2785 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002786 if (*dma_handle)
2787 return vaddr;
2788 free_pages((unsigned long)vaddr, order);
2789 return NULL;
2790}
2791
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002792static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2793 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002794{
2795 int order;
2796
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002797 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002798 order = get_order(size);
2799
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002800 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002801 free_pages((unsigned long)vaddr, order);
2802}
2803
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002804static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2805 int nelems, enum dma_data_direction dir,
2806 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002807{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002808 struct pci_dev *pdev = to_pci_dev(hwdev);
2809 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002810 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002811 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002812 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002813
David Woodhouse73676832009-07-04 14:08:36 +01002814 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002815 return;
2816
2817 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002818 BUG_ON(!domain);
2819
2820 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002821
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002822 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002823 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2824 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002825 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002826
David Woodhoused794dc92009-06-28 00:27:49 +01002827 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2828 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002829
2830 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002831 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002832
David Woodhoused794dc92009-06-28 00:27:49 +01002833 /* free page tables */
2834 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2835
David Woodhouseacea0012009-07-14 01:55:11 +01002836 if (intel_iommu_strict) {
2837 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002838 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01002839 /* free iova */
2840 __free_iova(&domain->iovad, iova);
2841 } else {
2842 add_unmap(domain, iova);
2843 /*
2844 * queue up the release of the unmap to save the 1/6th of the
2845 * cpu used up by the iotlb flush operation...
2846 */
2847 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002848}
2849
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002850static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002851 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002852{
2853 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002854 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002855
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002856 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02002857 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00002858 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002859 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002860 }
2861 return nelems;
2862}
2863
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002864static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2865 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002866{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002867 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002868 struct pci_dev *pdev = to_pci_dev(hwdev);
2869 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002870 size_t size = 0;
2871 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002872 struct iova *iova = NULL;
2873 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002874 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01002875 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08002876 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002877
2878 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01002879 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002880 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002881
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002882 domain = get_valid_domain_for_dev(pdev);
2883 if (!domain)
2884 return 0;
2885
Weidong Han8c11e792008-12-08 15:29:22 +08002886 iommu = domain_get_iommu(domain);
2887
David Woodhouseb536d242009-06-28 14:49:31 +01002888 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01002889 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002890
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002891 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2892 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002893 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002894 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002895 return 0;
2896 }
2897
2898 /*
2899 * Check if DMAR supports zero-length reads on write only
2900 * mappings..
2901 */
2902 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002903 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002904 prot |= DMA_PTE_READ;
2905 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2906 prot |= DMA_PTE_WRITE;
2907
David Woodhouseb536d242009-06-28 14:49:31 +01002908 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01002909
Fenghua Yuf5329592009-08-04 15:09:37 -07002910 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01002911 if (unlikely(ret)) {
2912 /* clear the page */
2913 dma_pte_clear_range(domain, start_vpfn,
2914 start_vpfn + size - 1);
2915 /* free page tables */
2916 dma_pte_free_pagetable(domain, start_vpfn,
2917 start_vpfn + size - 1);
2918 /* free iova */
2919 __free_iova(&domain->iovad, iova);
2920 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002922
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002923 /* it's a non-present to present mapping. Only flush if caching mode */
2924 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002925 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002926 else
Weidong Han8c11e792008-12-08 15:29:22 +08002927 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002928
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002929 return nelems;
2930}
2931
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002932static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2933{
2934 return !dma_addr;
2935}
2936
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002937struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002938 .alloc_coherent = intel_alloc_coherent,
2939 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940 .map_sg = intel_map_sg,
2941 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002942 .map_page = intel_map_page,
2943 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09002944 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002945};
2946
2947static inline int iommu_domain_cache_init(void)
2948{
2949 int ret = 0;
2950
2951 iommu_domain_cache = kmem_cache_create("iommu_domain",
2952 sizeof(struct dmar_domain),
2953 0,
2954 SLAB_HWCACHE_ALIGN,
2955
2956 NULL);
2957 if (!iommu_domain_cache) {
2958 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2959 ret = -ENOMEM;
2960 }
2961
2962 return ret;
2963}
2964
2965static inline int iommu_devinfo_cache_init(void)
2966{
2967 int ret = 0;
2968
2969 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2970 sizeof(struct device_domain_info),
2971 0,
2972 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002973 NULL);
2974 if (!iommu_devinfo_cache) {
2975 printk(KERN_ERR "Couldn't create devinfo cache\n");
2976 ret = -ENOMEM;
2977 }
2978
2979 return ret;
2980}
2981
2982static inline int iommu_iova_cache_init(void)
2983{
2984 int ret = 0;
2985
2986 iommu_iova_cache = kmem_cache_create("iommu_iova",
2987 sizeof(struct iova),
2988 0,
2989 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002990 NULL);
2991 if (!iommu_iova_cache) {
2992 printk(KERN_ERR "Couldn't create iova cache\n");
2993 ret = -ENOMEM;
2994 }
2995
2996 return ret;
2997}
2998
2999static int __init iommu_init_mempool(void)
3000{
3001 int ret;
3002 ret = iommu_iova_cache_init();
3003 if (ret)
3004 return ret;
3005
3006 ret = iommu_domain_cache_init();
3007 if (ret)
3008 goto domain_error;
3009
3010 ret = iommu_devinfo_cache_init();
3011 if (!ret)
3012 return ret;
3013
3014 kmem_cache_destroy(iommu_domain_cache);
3015domain_error:
3016 kmem_cache_destroy(iommu_iova_cache);
3017
3018 return -ENOMEM;
3019}
3020
3021static void __init iommu_exit_mempool(void)
3022{
3023 kmem_cache_destroy(iommu_devinfo_cache);
3024 kmem_cache_destroy(iommu_domain_cache);
3025 kmem_cache_destroy(iommu_iova_cache);
3026
3027}
3028
Dan Williams556ab452010-07-23 15:47:56 -07003029static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3030{
3031 struct dmar_drhd_unit *drhd;
3032 u32 vtbar;
3033 int rc;
3034
3035 /* We know that this device on this chipset has its own IOMMU.
3036 * If we find it under a different IOMMU, then the BIOS is lying
3037 * to us. Hope that the IOMMU for this device is actually
3038 * disabled, and it needs no translation...
3039 */
3040 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3041 if (rc) {
3042 /* "can't" happen */
3043 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3044 return;
3045 }
3046 vtbar &= 0xffff0000;
3047
3048 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3049 drhd = dmar_find_matched_drhd_unit(pdev);
3050 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3051 TAINT_FIRMWARE_WORKAROUND,
3052 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3053 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3054}
3055DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3056
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003057static void __init init_no_remapping_devices(void)
3058{
3059 struct dmar_drhd_unit *drhd;
3060
3061 for_each_drhd_unit(drhd) {
3062 if (!drhd->include_all) {
3063 int i;
3064 for (i = 0; i < drhd->devices_cnt; i++)
3065 if (drhd->devices[i] != NULL)
3066 break;
3067 /* ignore DMAR unit if no pci devices exist */
3068 if (i == drhd->devices_cnt)
3069 drhd->ignored = 1;
3070 }
3071 }
3072
3073 if (dmar_map_gfx)
3074 return;
3075
3076 for_each_drhd_unit(drhd) {
3077 int i;
3078 if (drhd->ignored || drhd->include_all)
3079 continue;
3080
3081 for (i = 0; i < drhd->devices_cnt; i++)
3082 if (drhd->devices[i] &&
3083 !IS_GFX_DEVICE(drhd->devices[i]))
3084 break;
3085
3086 if (i < drhd->devices_cnt)
3087 continue;
3088
3089 /* bypass IOMMU if it is just for gfx devices */
3090 drhd->ignored = 1;
3091 for (i = 0; i < drhd->devices_cnt; i++) {
3092 if (!drhd->devices[i])
3093 continue;
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07003094 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003095 }
3096 }
3097}
3098
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003099#ifdef CONFIG_SUSPEND
3100static int init_iommu_hw(void)
3101{
3102 struct dmar_drhd_unit *drhd;
3103 struct intel_iommu *iommu = NULL;
3104
3105 for_each_active_iommu(iommu, drhd)
3106 if (iommu->qi)
3107 dmar_reenable_qi(iommu);
3108
3109 for_each_active_iommu(iommu, drhd) {
3110 iommu_flush_write_buffer(iommu);
3111
3112 iommu_set_root_entry(iommu);
3113
3114 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003115 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003116 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003117 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003118 iommu_enable_translation(iommu);
David Woodhouseb94996c2009-09-19 15:28:12 -07003119 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003120 }
3121
3122 return 0;
3123}
3124
3125static void iommu_flush_all(void)
3126{
3127 struct dmar_drhd_unit *drhd;
3128 struct intel_iommu *iommu;
3129
3130 for_each_active_iommu(iommu, drhd) {
3131 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003132 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003133 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003134 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003135 }
3136}
3137
3138static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3139{
3140 struct dmar_drhd_unit *drhd;
3141 struct intel_iommu *iommu = NULL;
3142 unsigned long flag;
3143
3144 for_each_active_iommu(iommu, drhd) {
3145 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3146 GFP_ATOMIC);
3147 if (!iommu->iommu_state)
3148 goto nomem;
3149 }
3150
3151 iommu_flush_all();
3152
3153 for_each_active_iommu(iommu, drhd) {
3154 iommu_disable_translation(iommu);
3155
3156 spin_lock_irqsave(&iommu->register_lock, flag);
3157
3158 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3159 readl(iommu->reg + DMAR_FECTL_REG);
3160 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3161 readl(iommu->reg + DMAR_FEDATA_REG);
3162 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3163 readl(iommu->reg + DMAR_FEADDR_REG);
3164 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3165 readl(iommu->reg + DMAR_FEUADDR_REG);
3166
3167 spin_unlock_irqrestore(&iommu->register_lock, flag);
3168 }
3169 return 0;
3170
3171nomem:
3172 for_each_active_iommu(iommu, drhd)
3173 kfree(iommu->iommu_state);
3174
3175 return -ENOMEM;
3176}
3177
3178static int iommu_resume(struct sys_device *dev)
3179{
3180 struct dmar_drhd_unit *drhd;
3181 struct intel_iommu *iommu = NULL;
3182 unsigned long flag;
3183
3184 if (init_iommu_hw()) {
3185 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3186 return -EIO;
3187 }
3188
3189 for_each_active_iommu(iommu, drhd) {
3190
3191 spin_lock_irqsave(&iommu->register_lock, flag);
3192
3193 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3194 iommu->reg + DMAR_FECTL_REG);
3195 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3196 iommu->reg + DMAR_FEDATA_REG);
3197 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3198 iommu->reg + DMAR_FEADDR_REG);
3199 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3200 iommu->reg + DMAR_FEUADDR_REG);
3201
3202 spin_unlock_irqrestore(&iommu->register_lock, flag);
3203 }
3204
3205 for_each_active_iommu(iommu, drhd)
3206 kfree(iommu->iommu_state);
3207
3208 return 0;
3209}
3210
3211static struct sysdev_class iommu_sysclass = {
3212 .name = "iommu",
3213 .resume = iommu_resume,
3214 .suspend = iommu_suspend,
3215};
3216
3217static struct sys_device device_iommu = {
3218 .cls = &iommu_sysclass,
3219};
3220
3221static int __init init_iommu_sysfs(void)
3222{
3223 int error;
3224
3225 error = sysdev_class_register(&iommu_sysclass);
3226 if (error)
3227 return error;
3228
3229 error = sysdev_register(&device_iommu);
3230 if (error)
3231 sysdev_class_unregister(&iommu_sysclass);
3232
3233 return error;
3234}
3235
3236#else
3237static int __init init_iommu_sysfs(void)
3238{
3239 return 0;
3240}
3241#endif /* CONFIG_PM */
3242
Fenghua Yu99dcade2009-11-11 07:23:06 -08003243/*
3244 * Here we only respond to action of unbound device from driver.
3245 *
3246 * Added device is not attached to its DMAR domain here yet. That will happen
3247 * when mapping the device to iova.
3248 */
3249static int device_notifier(struct notifier_block *nb,
3250 unsigned long action, void *data)
3251{
3252 struct device *dev = data;
3253 struct pci_dev *pdev = to_pci_dev(dev);
3254 struct dmar_domain *domain;
3255
David Woodhouse44cd6132009-12-02 10:18:30 +00003256 if (iommu_no_mapping(dev))
3257 return 0;
3258
Fenghua Yu99dcade2009-11-11 07:23:06 -08003259 domain = find_domain(pdev);
3260 if (!domain)
3261 return 0;
3262
3263 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
3264 domain_remove_one_dev_info(domain, pdev);
3265
3266 return 0;
3267}
3268
3269static struct notifier_block device_nb = {
3270 .notifier_call = device_notifier,
3271};
3272
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003273int __init intel_iommu_init(void)
3274{
3275 int ret = 0;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003276 int force_on = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003277
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003278 /* VT-d is required for a TXT/tboot launch, so enforce that */
3279 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003280
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003281 if (dmar_table_init()) {
3282 if (force_on)
3283 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003284 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003285 }
3286
3287 if (dmar_dev_scope_init()) {
3288 if (force_on)
3289 panic("tboot: Failed to initialize DMAR device scope\n");
3290 return -ENODEV;
3291 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003292
Suresh Siddha2ae21012008-07-10 11:16:43 -07003293 /*
3294 * Check the need for DMA-remapping initialization now.
3295 * Above initialization will also be used by Interrupt-remapping.
3296 */
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003297 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003298 return -ENODEV;
3299
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003300 iommu_init_mempool();
3301 dmar_init_reserved_ranges();
3302
3303 init_no_remapping_devices();
3304
3305 ret = init_dmars();
3306 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003307 if (force_on)
3308 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003309 printk(KERN_ERR "IOMMU: dmar init failed\n");
3310 put_iova_domain(&reserved_iova_list);
3311 iommu_exit_mempool();
3312 return ret;
3313 }
3314 printk(KERN_INFO
3315 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3316
mark gross5e0d2a62008-03-04 15:22:08 -08003317 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003318#ifdef CONFIG_SWIOTLB
3319 swiotlb = 0;
3320#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003321 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003322
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003323 init_iommu_sysfs();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003324
3325 register_iommu(&intel_iommu_ops);
3326
Fenghua Yu99dcade2009-11-11 07:23:06 -08003327 bus_register_notifier(&pci_bus_type, &device_nb);
3328
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003329 return 0;
3330}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003331
Han, Weidong3199aa62009-02-26 17:31:12 +08003332static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3333 struct pci_dev *pdev)
3334{
3335 struct pci_dev *tmp, *parent;
3336
3337 if (!iommu || !pdev)
3338 return;
3339
3340 /* dependent device detach */
3341 tmp = pci_find_upstream_pcie_bridge(pdev);
3342 /* Secondary interface's bus number and devfn 0 */
3343 if (tmp) {
3344 parent = pdev->bus->self;
3345 while (parent != tmp) {
3346 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003347 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003348 parent = parent->bus->self;
3349 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003350 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003351 iommu_detach_dev(iommu,
3352 tmp->subordinate->number, 0);
3353 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003354 iommu_detach_dev(iommu, tmp->bus->number,
3355 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003356 }
3357}
3358
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003359static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003360 struct pci_dev *pdev)
3361{
3362 struct device_domain_info *info;
3363 struct intel_iommu *iommu;
3364 unsigned long flags;
3365 int found = 0;
3366 struct list_head *entry, *tmp;
3367
David Woodhouse276dbf992009-04-04 01:45:37 +01003368 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3369 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003370 if (!iommu)
3371 return;
3372
3373 spin_lock_irqsave(&device_domain_lock, flags);
3374 list_for_each_safe(entry, tmp, &domain->devices) {
3375 info = list_entry(entry, struct device_domain_info, link);
David Woodhouse276dbf992009-04-04 01:45:37 +01003376 /* No need to compare PCI domain; it has to be the same */
Weidong Hanc7151a82008-12-08 22:51:37 +08003377 if (info->bus == pdev->bus->number &&
3378 info->devfn == pdev->devfn) {
3379 list_del(&info->link);
3380 list_del(&info->global);
3381 if (info->dev)
3382 info->dev->dev.archdata.iommu = NULL;
3383 spin_unlock_irqrestore(&device_domain_lock, flags);
3384
Yu Zhao93a23a72009-05-18 13:51:37 +08003385 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003386 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003387 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003388 free_devinfo_mem(info);
3389
3390 spin_lock_irqsave(&device_domain_lock, flags);
3391
3392 if (found)
3393 break;
3394 else
3395 continue;
3396 }
3397
3398 /* if there is no other devices under the same iommu
3399 * owned by this domain, clear this iommu in iommu_bmp
3400 * update iommu count and coherency
3401 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003402 if (iommu == device_to_iommu(info->segment, info->bus,
3403 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003404 found = 1;
3405 }
3406
3407 if (found == 0) {
3408 unsigned long tmp_flags;
3409 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3410 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3411 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003412 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003413 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3414 }
3415
3416 spin_unlock_irqrestore(&device_domain_lock, flags);
3417}
3418
3419static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3420{
3421 struct device_domain_info *info;
3422 struct intel_iommu *iommu;
3423 unsigned long flags1, flags2;
3424
3425 spin_lock_irqsave(&device_domain_lock, flags1);
3426 while (!list_empty(&domain->devices)) {
3427 info = list_entry(domain->devices.next,
3428 struct device_domain_info, link);
3429 list_del(&info->link);
3430 list_del(&info->global);
3431 if (info->dev)
3432 info->dev->dev.archdata.iommu = NULL;
3433
3434 spin_unlock_irqrestore(&device_domain_lock, flags1);
3435
Yu Zhao93a23a72009-05-18 13:51:37 +08003436 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003437 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003438 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003439 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003440
3441 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003442 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003443 */
3444 spin_lock_irqsave(&domain->iommu_lock, flags2);
3445 if (test_and_clear_bit(iommu->seq_id,
3446 &domain->iommu_bmp)) {
3447 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003448 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003449 }
3450 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3451
3452 free_devinfo_mem(info);
3453 spin_lock_irqsave(&device_domain_lock, flags1);
3454 }
3455 spin_unlock_irqrestore(&device_domain_lock, flags1);
3456}
3457
Weidong Han5e98c4b2008-12-08 23:03:27 +08003458/* domain id for virtual machine, it won't be set in context */
3459static unsigned long vm_domid;
3460
3461static struct dmar_domain *iommu_alloc_vm_domain(void)
3462{
3463 struct dmar_domain *domain;
3464
3465 domain = alloc_domain_mem();
3466 if (!domain)
3467 return NULL;
3468
3469 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003470 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003471 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3472 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3473
3474 return domain;
3475}
3476
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003477static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003478{
3479 int adjust_width;
3480
3481 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003482 spin_lock_init(&domain->iommu_lock);
3483
3484 domain_reserve_special_ranges(domain);
3485
3486 /* calculate AGAW */
3487 domain->gaw = guest_width;
3488 adjust_width = guestwidth_to_adjustwidth(guest_width);
3489 domain->agaw = width_to_agaw(adjust_width);
3490
3491 INIT_LIST_HEAD(&domain->devices);
3492
3493 domain->iommu_count = 0;
3494 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003495 domain->iommu_snooping = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003496 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003497 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003498
3499 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003500 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003501 if (!domain->pgd)
3502 return -ENOMEM;
3503 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3504 return 0;
3505}
3506
3507static void iommu_free_vm_domain(struct dmar_domain *domain)
3508{
3509 unsigned long flags;
3510 struct dmar_drhd_unit *drhd;
3511 struct intel_iommu *iommu;
3512 unsigned long i;
3513 unsigned long ndomains;
3514
3515 for_each_drhd_unit(drhd) {
3516 if (drhd->ignored)
3517 continue;
3518 iommu = drhd->iommu;
3519
3520 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003521 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003522 if (iommu->domains[i] == domain) {
3523 spin_lock_irqsave(&iommu->lock, flags);
3524 clear_bit(i, iommu->domain_ids);
3525 iommu->domains[i] = NULL;
3526 spin_unlock_irqrestore(&iommu->lock, flags);
3527 break;
3528 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003529 }
3530 }
3531}
3532
3533static void vm_domain_exit(struct dmar_domain *domain)
3534{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003535 /* Domain 0 is reserved, so dont process it */
3536 if (!domain)
3537 return;
3538
3539 vm_domain_remove_all_dev_info(domain);
3540 /* destroy iovas */
3541 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003542
3543 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003544 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003545
3546 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003547 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003548
3549 iommu_free_vm_domain(domain);
3550 free_domain_mem(domain);
3551}
3552
Joerg Roedel5d450802008-12-03 14:52:32 +01003553static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003554{
Joerg Roedel5d450802008-12-03 14:52:32 +01003555 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003556
Joerg Roedel5d450802008-12-03 14:52:32 +01003557 dmar_domain = iommu_alloc_vm_domain();
3558 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003559 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003560 "intel_iommu_domain_init: dmar_domain == NULL\n");
3561 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003562 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003563 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003564 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003565 "intel_iommu_domain_init() failed\n");
3566 vm_domain_exit(dmar_domain);
3567 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003568 }
Joerg Roedel5d450802008-12-03 14:52:32 +01003569 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003570
Joerg Roedel5d450802008-12-03 14:52:32 +01003571 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003572}
Kay, Allen M38717942008-09-09 18:37:29 +03003573
Joerg Roedel5d450802008-12-03 14:52:32 +01003574static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003575{
Joerg Roedel5d450802008-12-03 14:52:32 +01003576 struct dmar_domain *dmar_domain = domain->priv;
3577
3578 domain->priv = NULL;
3579 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003580}
Kay, Allen M38717942008-09-09 18:37:29 +03003581
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003582static int intel_iommu_attach_device(struct iommu_domain *domain,
3583 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003584{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003585 struct dmar_domain *dmar_domain = domain->priv;
3586 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003587 struct intel_iommu *iommu;
3588 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003589
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003590 /* normally pdev is not mapped */
3591 if (unlikely(domain_context_mapped(pdev))) {
3592 struct dmar_domain *old_domain;
3593
3594 old_domain = find_domain(pdev);
3595 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003596 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3597 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3598 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003599 else
3600 domain_remove_dev_info(old_domain);
3601 }
3602 }
3603
David Woodhouse276dbf992009-04-04 01:45:37 +01003604 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3605 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003606 if (!iommu)
3607 return -ENODEV;
3608
3609 /* check if this iommu agaw is sufficient for max mapped address */
3610 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003611 if (addr_width > cap_mgaw(iommu->cap))
3612 addr_width = cap_mgaw(iommu->cap);
3613
3614 if (dmar_domain->max_addr > (1LL << addr_width)) {
3615 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003616 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003617 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003618 return -EFAULT;
3619 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003620 dmar_domain->gaw = addr_width;
3621
3622 /*
3623 * Knock out extra levels of page tables if necessary
3624 */
3625 while (iommu->agaw < dmar_domain->agaw) {
3626 struct dma_pte *pte;
3627
3628 pte = dmar_domain->pgd;
3629 if (dma_pte_present(pte)) {
3630 free_pgtable_page(dmar_domain->pgd);
Sheng Yang25cbff12010-06-12 19:21:42 +08003631 dmar_domain->pgd = (struct dma_pte *)
3632 phys_to_virt(dma_pte_addr(pte));
Tom Lyona99c47a2010-05-17 08:20:45 +01003633 }
3634 dmar_domain->agaw--;
3635 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003636
David Woodhouse5fe60f42009-08-09 10:53:41 +01003637 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003638}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003639
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003640static void intel_iommu_detach_device(struct iommu_domain *domain,
3641 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003642{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003643 struct dmar_domain *dmar_domain = domain->priv;
3644 struct pci_dev *pdev = to_pci_dev(dev);
3645
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003646 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003647}
Kay, Allen M38717942008-09-09 18:37:29 +03003648
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003649static int intel_iommu_map(struct iommu_domain *domain,
3650 unsigned long iova, phys_addr_t hpa,
3651 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003652{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003653 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003654 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003655 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003656 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003657 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003658
Joerg Roedeldde57a22008-12-03 15:04:09 +01003659 if (iommu_prot & IOMMU_READ)
3660 prot |= DMA_PTE_READ;
3661 if (iommu_prot & IOMMU_WRITE)
3662 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003663 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3664 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003665
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003666 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01003667 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003668 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003669 u64 end;
3670
3671 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01003672 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003673 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01003674 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003675 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01003676 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003677 return -EFAULT;
3678 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01003679 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003680 }
David Woodhousead051222009-06-28 14:22:28 +01003681 /* Round up size to next multiple of PAGE_SIZE, if it and
3682 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01003683 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01003684 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3685 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003686 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03003687}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003688
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003689static int intel_iommu_unmap(struct iommu_domain *domain,
3690 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003691{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003692 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003693 size_t size = PAGE_SIZE << gfp_order;
Sheng Yang4b99d352009-07-08 11:52:52 +01003694
David Woodhouse163cc522009-06-28 00:51:17 +01003695 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3696 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003697
David Woodhouse163cc522009-06-28 00:51:17 +01003698 if (dmar_domain->max_addr == iova + size)
3699 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003700
3701 return gfp_order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003702}
Kay, Allen M38717942008-09-09 18:37:29 +03003703
Joerg Roedeld14d6572008-12-03 15:06:57 +01003704static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3705 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03003706{
Joerg Roedeld14d6572008-12-03 15:06:57 +01003707 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03003708 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003709 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003710
David Woodhouseb026fd22009-06-28 10:37:25 +01003711 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
Kay, Allen M38717942008-09-09 18:37:29 +03003712 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003713 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03003714
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003715 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03003716}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003717
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003718static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3719 unsigned long cap)
3720{
3721 struct dmar_domain *dmar_domain = domain->priv;
3722
3723 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3724 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04003725 if (cap == IOMMU_CAP_INTR_REMAP)
3726 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003727
3728 return 0;
3729}
3730
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003731static struct iommu_ops intel_iommu_ops = {
3732 .domain_init = intel_iommu_domain_init,
3733 .domain_destroy = intel_iommu_domain_destroy,
3734 .attach_dev = intel_iommu_attach_device,
3735 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003736 .map = intel_iommu_map,
3737 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003738 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003739 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003740};
David Woodhouse9af88142009-02-13 23:18:03 +00003741
3742static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3743{
3744 /*
3745 * Mobile 4 Series Chipset neglects to set RWBF capability,
3746 * but needs it:
3747 */
3748 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3749 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01003750
3751 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
3752 if (dev->revision == 0x07) {
3753 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
3754 dmar_map_gfx = 0;
3755 }
David Woodhouse9af88142009-02-13 23:18:03 +00003756}
3757
3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07003759
Adam Jacksoneecfd572010-08-25 21:17:34 +01003760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
David Woodhouse9eecabc2010-09-21 22:28:23 +01003770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
Adam Jacksoneecfd572010-08-25 21:17:34 +01003774 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01003775 return;
3776
Adam Jacksoneecfd572010-08-25 21:17:34 +01003777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01003778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
David Woodhousee0fc7e02009-09-30 09:12:17 -07003787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3789 TLB entries, which causes it to deadlock. Check for that. We do
3790 this in a function called from init_dmars(), instead of in a PCI
3791 quirk, because we don't want to print the obnoxious "BIOS broken"
3792 message if VT-d is actually disabled.
3793*/
3794static void __init check_tylersburg_isoch(void)
3795{
3796 struct pci_dev *pdev;
3797 uint32_t vtisochctrl;
3798
3799 /* If there's no Azalia in the system anyway, forget it. */
3800 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3801 if (!pdev)
3802 return;
3803 pci_dev_put(pdev);
3804
3805 /* System Management Registers. Might be hidden, in which case
3806 we can't do the sanity check. But that's OK, because the
3807 known-broken BIOSes _don't_ actually hide it, so far. */
3808 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3809 if (!pdev)
3810 return;
3811
3812 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3813 pci_dev_put(pdev);
3814 return;
3815 }
3816
3817 pci_dev_put(pdev);
3818
3819 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3820 if (vtisochctrl & 1)
3821 return;
3822
3823 /* Drop all bits other than the number of TLB entries */
3824 vtisochctrl &= 0x1c;
3825
3826 /* If we have the recommended number of TLB entries (16), fine. */
3827 if (vtisochctrl == 0x10)
3828 return;
3829
3830 /* Zero TLB entries? You get to ride the short bus to school. */
3831 if (!vtisochctrl) {
3832 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
3833 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3834 dmi_get_system_info(DMI_BIOS_VENDOR),
3835 dmi_get_system_info(DMI_BIOS_VERSION),
3836 dmi_get_system_info(DMI_PRODUCT_VERSION));
3837 iommu_identity_mapping |= IDENTMAP_AZALIA;
3838 return;
3839 }
3840
3841 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
3842 vtisochctrl);
3843}