blob: c0c7820d4c46b406465e0d2d8e059a80ce819476 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Fenghua Yu5b6985c2008-10-16 18:02:32 -070047#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE
49
Mike Travis825507d2011-05-28 13:15:06 -050050#define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
63
David Woodhouse2ebe3152009-09-19 07:34:04 -070064#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070072
Mark McLoughlinf27be032008-11-20 15:49:43 +000073#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070074#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070075#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080076
Andrew Mortondf08cdc2010-09-22 13:05:11 -070077/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
81static inline int agaw_to_level(int agaw)
82{
83 return agaw + 2;
84}
85
86static inline int agaw_to_width(int agaw)
87{
88 return 30 + agaw * LEVEL_STRIDE;
89}
90
91static inline int width_to_agaw(int width)
92{
93 return (width - 30) / LEVEL_STRIDE;
94}
95
96static inline unsigned int level_to_offset_bits(int level)
97{
98 return (level - 1) * LEVEL_STRIDE;
99}
100
101static inline int pfn_level_offset(unsigned long pfn, int level)
102{
103 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
104}
105
106static inline unsigned long level_mask(int level)
107{
108 return -1UL << level_to_offset_bits(level);
109}
110
111static inline unsigned long level_size(int level)
112{
113 return 1UL << level_to_offset_bits(level);
114}
115
116static inline unsigned long align_to_level(unsigned long pfn, int level)
117{
118 return (pfn + level_size(level) - 1) & level_mask(level);
119}
David Woodhousefd18de52009-05-10 23:57:41 +0100120
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100121static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
122{
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
124}
125
David Woodhousedd4e8312009-06-27 16:21:20 +0100126/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
127 are never going to work. */
128static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
129{
130 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
131}
132
133static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
134{
135 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
136}
137static inline unsigned long page_to_dma_pfn(struct page *pg)
138{
139 return mm_to_dma_pfn(page_to_pfn(pg));
140}
141static inline unsigned long virt_to_dma_pfn(void *p)
142{
143 return page_to_dma_pfn(virt_to_page(p));
144}
145
Weidong Hand9630fe2008-12-08 11:06:32 +0800146/* global iommu list, set NULL for ignored DMAR units */
147static struct intel_iommu **g_iommus;
148
David Woodhousee0fc7e02009-09-30 09:12:17 -0700149static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000150static int rwbf_quirk;
151
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000152/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
155 */
156static int force_on = 0;
157
158/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000159 * 0: Present
160 * 1-11: Reserved
161 * 12-63: Context Ptr (12 - (haw-1))
162 * 64-127: Reserved
163 */
164struct root_entry {
165 u64 val;
166 u64 rsvd1;
167};
168#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
169static inline bool root_present(struct root_entry *root)
170{
171 return (root->val & 1);
172}
173static inline void set_root_present(struct root_entry *root)
174{
175 root->val |= 1;
176}
177static inline void set_root_value(struct root_entry *root, unsigned long value)
178{
179 root->val |= value & VTD_PAGE_MASK;
180}
181
182static inline struct context_entry *
183get_context_addr_from_root(struct root_entry *root)
184{
185 return (struct context_entry *)
186 (root_present(root)?phys_to_virt(
187 root->val & VTD_PAGE_MASK) :
188 NULL);
189}
190
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000191/*
192 * low 64 bits:
193 * 0: present
194 * 1: fault processing disable
195 * 2-3: translation type
196 * 12-63: address space root
197 * high 64 bits:
198 * 0-2: address width
199 * 3-6: aval
200 * 8-23: domain id
201 */
202struct context_entry {
203 u64 lo;
204 u64 hi;
205};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000206
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000207static inline bool context_present(struct context_entry *context)
208{
209 return (context->lo & 1);
210}
211static inline void context_set_present(struct context_entry *context)
212{
213 context->lo |= 1;
214}
215
216static inline void context_set_fault_enable(struct context_entry *context)
217{
218 context->lo &= (((u64)-1) << 2) | 1;
219}
220
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000221static inline void context_set_translation_type(struct context_entry *context,
222 unsigned long value)
223{
224 context->lo &= (((u64)-1) << 4) | 3;
225 context->lo |= (value & 3) << 2;
226}
227
228static inline void context_set_address_root(struct context_entry *context,
229 unsigned long value)
230{
231 context->lo |= value & VTD_PAGE_MASK;
232}
233
234static inline void context_set_address_width(struct context_entry *context,
235 unsigned long value)
236{
237 context->hi |= value & 7;
238}
239
240static inline void context_set_domain_id(struct context_entry *context,
241 unsigned long value)
242{
243 context->hi |= (value & ((1 << 16) - 1)) << 8;
244}
245
246static inline void context_clear_entry(struct context_entry *context)
247{
248 context->lo = 0;
249 context->hi = 0;
250}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000251
Mark McLoughlin622ba122008-11-20 15:49:46 +0000252/*
253 * 0: readable
254 * 1: writable
255 * 2-6: reserved
256 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800257 * 8-10: available
258 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000259 * 12-63: Host physcial address
260 */
261struct dma_pte {
262 u64 val;
263};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000264
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000265static inline void dma_clear_pte(struct dma_pte *pte)
266{
267 pte->val = 0;
268}
269
270static inline void dma_set_pte_readable(struct dma_pte *pte)
271{
272 pte->val |= DMA_PTE_READ;
273}
274
275static inline void dma_set_pte_writable(struct dma_pte *pte)
276{
277 pte->val |= DMA_PTE_WRITE;
278}
279
Sheng Yang9cf066972009-03-18 15:33:07 +0800280static inline void dma_set_pte_snp(struct dma_pte *pte)
281{
282 pte->val |= DMA_PTE_SNP;
283}
284
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
286{
287 pte->val = (pte->val & ~3) | (prot & 3);
288}
289
290static inline u64 dma_pte_addr(struct dma_pte *pte)
291{
David Woodhousec85994e2009-07-01 19:21:24 +0100292#ifdef CONFIG_64BIT
293 return pte->val & VTD_PAGE_MASK;
294#else
295 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100297#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000298}
299
David Woodhousedd4e8312009-06-27 16:21:20 +0100300static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000301{
David Woodhousedd4e8312009-06-27 16:21:20 +0100302 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303}
304
305static inline bool dma_pte_present(struct dma_pte *pte)
306{
307 return (pte->val & 3) != 0;
308}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000309
Allen Kay4399c8b2011-10-14 12:32:46 -0700310static inline bool dma_pte_superpage(struct dma_pte *pte)
311{
312 return (pte->val & (1 << 7));
313}
314
David Woodhouse75e6bf92009-07-02 11:21:16 +0100315static inline int first_pte_in_page(struct dma_pte *pte)
316{
317 return !((unsigned long)pte & ~VTD_PAGE_MASK);
318}
319
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700320/*
321 * This domain is a statically identity mapping domain.
322 * 1. This domain creats a static 1:1 mapping to all usable memory.
323 * 2. It maps to each iommu if successful.
324 * 3. Each iommu mapps to this domain if successful.
325 */
David Woodhouse19943b02009-08-04 16:19:20 +0100326static struct dmar_domain *si_domain;
327static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700328
Weidong Han3b5410e2008-12-08 09:17:15 +0800329/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100330#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800331
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332/* domain represents a virtual machine, more than one devices
333 * across iommus may be owned in one domain, e.g. kvm guest.
334 */
335#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700337/* si_domain contains mulitple devices */
338#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
339
Mark McLoughlin99126f72008-11-20 15:49:47 +0000340struct dmar_domain {
341 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700342 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800343 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344
345 struct list_head devices; /* all devices' list */
346 struct iova_domain iovad; /* iova's that belong to this domain */
347
348 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349 int gaw; /* max guest address width */
350
351 /* adjusted guest address width, 0 is level 2 30-bit */
352 int agaw;
353
Weidong Han3b5410e2008-12-08 09:17:15 +0800354 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800355
356 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800357 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800358 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100359 int iommu_superpage;/* Level of superpages supported:
360 0 == 4KiB (no superpages), 1 == 2MiB,
361 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800362 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800363 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000364};
365
Mark McLoughlina647dac2008-11-20 15:49:48 +0000366/* PCI domain-device relationship */
367struct device_domain_info {
368 struct list_head link; /* link to domain siblings */
369 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100370 int segment; /* PCI domain */
371 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000372 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500373 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800374 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000375 struct dmar_domain *domain; /* pointer to domain */
376};
377
mark gross5e0d2a62008-03-04 15:22:08 -0800378static void flush_unmaps_timeout(unsigned long data);
379
380DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
381
mark gross80b20dd2008-04-18 13:53:58 -0700382#define HIGH_WATER_MARK 250
383struct deferred_flush_tables {
384 int next;
385 struct iova *iova[HIGH_WATER_MARK];
386 struct dmar_domain *domain[HIGH_WATER_MARK];
387};
388
389static struct deferred_flush_tables *deferred_flush;
390
mark gross5e0d2a62008-03-04 15:22:08 -0800391/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800392static int g_num_of_iommus;
393
394static DEFINE_SPINLOCK(async_umap_flush_lock);
395static LIST_HEAD(unmaps_to_do);
396
397static int timer_on;
398static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800399
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700400static void domain_remove_dev_info(struct dmar_domain *domain);
401
Suresh Siddhad3f13812011-08-23 17:05:25 -0700402#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800403int dmar_disabled = 0;
404#else
405int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800407
David Woodhouse2d9e6672010-06-15 10:57:57 +0100408static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700409static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800410static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100411static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700412
David Woodhousec0771df2011-10-14 20:59:46 +0100413int intel_iommu_gfx_mapped;
414EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
415
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700416#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
417static DEFINE_SPINLOCK(device_domain_lock);
418static LIST_HEAD(device_domain_list);
419
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100420static struct iommu_ops intel_iommu_ops;
421
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700422static int __init intel_iommu_setup(char *str)
423{
424 if (!str)
425 return -EINVAL;
426 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800427 if (!strncmp(str, "on", 2)) {
428 dmar_disabled = 0;
429 printk(KERN_INFO "Intel-IOMMU: enabled\n");
430 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700431 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800432 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700433 } else if (!strncmp(str, "igfx_off", 8)) {
434 dmar_map_gfx = 0;
435 printk(KERN_INFO
436 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700437 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800438 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700439 "Intel-IOMMU: Forcing DAC for PCI devices\n");
440 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800441 } else if (!strncmp(str, "strict", 6)) {
442 printk(KERN_INFO
443 "Intel-IOMMU: disable batched IOTLB flush\n");
444 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100445 } else if (!strncmp(str, "sp_off", 6)) {
446 printk(KERN_INFO
447 "Intel-IOMMU: disable supported super page\n");
448 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449 }
450
451 str += strcspn(str, ",");
452 while (*str == ',')
453 str++;
454 }
455 return 0;
456}
457__setup("intel_iommu=", intel_iommu_setup);
458
459static struct kmem_cache *iommu_domain_cache;
460static struct kmem_cache *iommu_devinfo_cache;
461static struct kmem_cache *iommu_iova_cache;
462
Suresh Siddha4c923d42009-10-02 11:01:24 -0700463static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700464{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700465 struct page *page;
466 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700467
Suresh Siddha4c923d42009-10-02 11:01:24 -0700468 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
469 if (page)
470 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700471 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700472}
473
474static inline void free_pgtable_page(void *vaddr)
475{
476 free_page((unsigned long)vaddr);
477}
478
479static inline void *alloc_domain_mem(void)
480{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900481 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700482}
483
Kay, Allen M38717942008-09-09 18:37:29 +0300484static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700485{
486 kmem_cache_free(iommu_domain_cache, vaddr);
487}
488
489static inline void * alloc_devinfo_mem(void)
490{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900491 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700492}
493
494static inline void free_devinfo_mem(void *vaddr)
495{
496 kmem_cache_free(iommu_devinfo_cache, vaddr);
497}
498
499struct iova *alloc_iova_mem(void)
500{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900501 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700502}
503
504void free_iova_mem(struct iova *iova)
505{
506 kmem_cache_free(iommu_iova_cache, iova);
507}
508
Weidong Han1b573682008-12-08 15:34:06 +0800509
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700510static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800511{
512 unsigned long sagaw;
513 int agaw = -1;
514
515 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700516 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800517 agaw >= 0; agaw--) {
518 if (test_bit(agaw, &sagaw))
519 break;
520 }
521
522 return agaw;
523}
524
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700525/*
526 * Calculate max SAGAW for each iommu.
527 */
528int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
529{
530 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
531}
532
533/*
534 * calculate agaw for each iommu.
535 * "SAGAW" may be different across iommus, use a default agaw, and
536 * get a supported less agaw for iommus that don't support the default agaw.
537 */
538int iommu_calculate_agaw(struct intel_iommu *iommu)
539{
540 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
541}
542
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700543/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800544static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
545{
546 int iommu_id;
547
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700548 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800549 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700550 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800551
Weidong Han8c11e792008-12-08 15:29:22 +0800552 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
553 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
554 return NULL;
555
556 return g_iommus[iommu_id];
557}
558
Weidong Han8e6040972008-12-08 15:49:06 +0800559static void domain_update_iommu_coherency(struct dmar_domain *domain)
560{
561 int i;
562
563 domain->iommu_coherency = 1;
564
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800565 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800566 if (!ecap_coherent(g_iommus[i]->ecap)) {
567 domain->iommu_coherency = 0;
568 break;
569 }
Weidong Han8e6040972008-12-08 15:49:06 +0800570 }
571}
572
Sheng Yang58c610b2009-03-18 15:33:05 +0800573static void domain_update_iommu_snooping(struct dmar_domain *domain)
574{
575 int i;
576
577 domain->iommu_snooping = 1;
578
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800579 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800580 if (!ecap_sc_support(g_iommus[i]->ecap)) {
581 domain->iommu_snooping = 0;
582 break;
583 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800584 }
585}
586
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100587static void domain_update_iommu_superpage(struct dmar_domain *domain)
588{
Allen Kay8140a952011-10-14 12:32:17 -0700589 struct dmar_drhd_unit *drhd;
590 struct intel_iommu *iommu = NULL;
591 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100592
593 if (!intel_iommu_superpage) {
594 domain->iommu_superpage = 0;
595 return;
596 }
597
Allen Kay8140a952011-10-14 12:32:17 -0700598 /* set iommu_superpage to the smallest common denominator */
599 for_each_active_iommu(iommu, drhd) {
600 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100601 if (!mask) {
602 break;
603 }
604 }
605 domain->iommu_superpage = fls(mask);
606}
607
Sheng Yang58c610b2009-03-18 15:33:05 +0800608/* Some capabilities may be different across iommus */
609static void domain_update_iommu_cap(struct dmar_domain *domain)
610{
611 domain_update_iommu_coherency(domain);
612 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100613 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800614}
615
David Woodhouse276dbf992009-04-04 01:45:37 +0100616static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800617{
618 struct dmar_drhd_unit *drhd = NULL;
619 int i;
620
621 for_each_drhd_unit(drhd) {
622 if (drhd->ignored)
623 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100624 if (segment != drhd->segment)
625 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800626
David Woodhouse924b6232009-04-04 00:39:25 +0100627 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000628 if (drhd->devices[i] &&
629 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800630 drhd->devices[i]->devfn == devfn)
631 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700632 if (drhd->devices[i] &&
633 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100634 drhd->devices[i]->subordinate->number <= bus &&
635 drhd->devices[i]->subordinate->subordinate >= bus)
636 return drhd->iommu;
637 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800638
639 if (drhd->include_all)
640 return drhd->iommu;
641 }
642
643 return NULL;
644}
645
Weidong Han5331fe62008-12-08 23:00:00 +0800646static void domain_flush_cache(struct dmar_domain *domain,
647 void *addr, int size)
648{
649 if (!domain->iommu_coherency)
650 clflush_cache_range(addr, size);
651}
652
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700653/* Gets context entry for a given bus and devfn */
654static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
655 u8 bus, u8 devfn)
656{
657 struct root_entry *root;
658 struct context_entry *context;
659 unsigned long phy_addr;
660 unsigned long flags;
661
662 spin_lock_irqsave(&iommu->lock, flags);
663 root = &iommu->root_entry[bus];
664 context = get_context_addr_from_root(root);
665 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700666 context = (struct context_entry *)
667 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700668 if (!context) {
669 spin_unlock_irqrestore(&iommu->lock, flags);
670 return NULL;
671 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700672 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700673 phy_addr = virt_to_phys((void *)context);
674 set_root_value(root, phy_addr);
675 set_root_present(root);
676 __iommu_flush_cache(iommu, root, sizeof(*root));
677 }
678 spin_unlock_irqrestore(&iommu->lock, flags);
679 return &context[devfn];
680}
681
682static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
683{
684 struct root_entry *root;
685 struct context_entry *context;
686 int ret;
687 unsigned long flags;
688
689 spin_lock_irqsave(&iommu->lock, flags);
690 root = &iommu->root_entry[bus];
691 context = get_context_addr_from_root(root);
692 if (!context) {
693 ret = 0;
694 goto out;
695 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000696 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700697out:
698 spin_unlock_irqrestore(&iommu->lock, flags);
699 return ret;
700}
701
702static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
703{
704 struct root_entry *root;
705 struct context_entry *context;
706 unsigned long flags;
707
708 spin_lock_irqsave(&iommu->lock, flags);
709 root = &iommu->root_entry[bus];
710 context = get_context_addr_from_root(root);
711 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000712 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700713 __iommu_flush_cache(iommu, &context[devfn], \
714 sizeof(*context));
715 }
716 spin_unlock_irqrestore(&iommu->lock, flags);
717}
718
719static void free_context_table(struct intel_iommu *iommu)
720{
721 struct root_entry *root;
722 int i;
723 unsigned long flags;
724 struct context_entry *context;
725
726 spin_lock_irqsave(&iommu->lock, flags);
727 if (!iommu->root_entry) {
728 goto out;
729 }
730 for (i = 0; i < ROOT_ENTRY_NR; i++) {
731 root = &iommu->root_entry[i];
732 context = get_context_addr_from_root(root);
733 if (context)
734 free_pgtable_page(context);
735 }
736 free_pgtable_page(iommu->root_entry);
737 iommu->root_entry = NULL;
738out:
739 spin_unlock_irqrestore(&iommu->lock, flags);
740}
741
David Woodhouseb026fd22009-06-28 10:37:25 +0100742static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700743 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700744{
David Woodhouseb026fd22009-06-28 10:37:25 +0100745 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700746 struct dma_pte *parent, *pte = NULL;
747 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700748 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749
750 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100751 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752 parent = domain->pgd;
753
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700754 while (level > 0) {
755 void *tmp_page;
756
David Woodhouseb026fd22009-06-28 10:37:25 +0100757 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700758 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700759 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100760 break;
761 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762 break;
763
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000764 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100765 uint64_t pteval;
766
Suresh Siddha4c923d42009-10-02 11:01:24 -0700767 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700768
David Woodhouse206a73c12009-07-01 19:30:28 +0100769 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100771
David Woodhousec85994e2009-07-01 19:21:24 +0100772 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400773 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100774 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
775 /* Someone else set it while we were thinking; use theirs. */
776 free_pgtable_page(tmp_page);
777 } else {
778 dma_pte_addr(pte);
779 domain_flush_cache(domain, pte, sizeof(*pte));
780 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700781 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000782 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700783 level--;
784 }
785
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 return pte;
787}
788
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100789
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700790/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100791static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
792 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100793 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700794{
795 struct dma_pte *parent, *pte = NULL;
796 int total = agaw_to_level(domain->agaw);
797 int offset;
798
799 parent = domain->pgd;
800 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100801 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700802 pte = &parent[offset];
803 if (level == total)
804 return pte;
805
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100806 if (!dma_pte_present(pte)) {
807 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100809 }
810
811 if (pte->val & DMA_PTE_LARGE_PAGE) {
812 *large_page = total;
813 return pte;
814 }
815
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000816 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700817 total--;
818 }
819 return NULL;
820}
821
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700822/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700823static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100824 unsigned long start_pfn,
825 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826{
David Woodhouse04b18e62009-06-27 19:15:01 +0100827 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100828 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100829 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700830 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700831
David Woodhouse04b18e62009-06-27 19:15:01 +0100832 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100833 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700834 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100835
David Woodhouse04b18e62009-06-27 19:15:01 +0100836 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700837 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100838 large_page = 1;
839 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100840 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100841 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100842 continue;
843 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100845 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100846 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100847 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100848 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
849
David Woodhouse310a5ab2009-06-28 18:52:20 +0100850 domain_flush_cache(domain, first_pte,
851 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700852
853 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700854
855 order = (large_page - 1) * 9;
856 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700857}
858
859/* free page table pages. last level pte should already be cleared */
860static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100861 unsigned long start_pfn,
862 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700863{
David Woodhouse6660c632009-06-27 22:41:00 +0100864 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100865 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866 int total = agaw_to_level(domain->agaw);
867 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100868 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100869 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700870
David Woodhouse6660c632009-06-27 22:41:00 +0100871 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
872 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700873 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700874
David Woodhousef3a0a522009-06-30 03:40:07 +0100875 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700876 level = 2;
877 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100878 tmp = align_to_level(start_pfn, level);
879
David Woodhousef3a0a522009-06-30 03:40:07 +0100880 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100881 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700882 return;
883
David Woodhouse59c36282009-09-19 07:36:28 -0700884 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100885 large_page = level;
886 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
887 if (large_page > level)
888 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100889 if (!pte) {
890 tmp = align_to_level(tmp + 1, level + 1);
891 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100893 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100894 if (dma_pte_present(pte)) {
895 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
896 dma_clear_pte(pte);
897 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100898 pte++;
899 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100900 } while (!first_pte_in_page(pte) &&
901 tmp + level_size(level) - 1 <= last_pfn);
902
David Woodhousef3a0a522009-06-30 03:40:07 +0100903 domain_flush_cache(domain, first_pte,
904 (void *)pte - (void *)first_pte);
905
David Woodhouse59c36282009-09-19 07:36:28 -0700906 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700907 level++;
908 }
909 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100910 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700911 free_pgtable_page(domain->pgd);
912 domain->pgd = NULL;
913 }
914}
915
916/* iommu handling */
917static int iommu_alloc_root_entry(struct intel_iommu *iommu)
918{
919 struct root_entry *root;
920 unsigned long flags;
921
Suresh Siddha4c923d42009-10-02 11:01:24 -0700922 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700923 if (!root)
924 return -ENOMEM;
925
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700926 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700927
928 spin_lock_irqsave(&iommu->lock, flags);
929 iommu->root_entry = root;
930 spin_unlock_irqrestore(&iommu->lock, flags);
931
932 return 0;
933}
934
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700935static void iommu_set_root_entry(struct intel_iommu *iommu)
936{
937 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100938 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700939 unsigned long flag;
940
941 addr = iommu->root_entry;
942
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200943 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700944 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
945
David Woodhousec416daa2009-05-10 20:30:58 +0100946 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947
948 /* Make sure hardware complete it */
949 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100950 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700951
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200952 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953}
954
955static void iommu_flush_write_buffer(struct intel_iommu *iommu)
956{
957 u32 val;
958 unsigned long flag;
959
David Woodhouse9af88142009-02-13 23:18:03 +0000960 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700961 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700962
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200963 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100964 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965
966 /* Make sure hardware complete it */
967 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100968 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200970 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971}
972
973/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100974static void __iommu_flush_context(struct intel_iommu *iommu,
975 u16 did, u16 source_id, u8 function_mask,
976 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700977{
978 u64 val = 0;
979 unsigned long flag;
980
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700981 switch (type) {
982 case DMA_CCMD_GLOBAL_INVL:
983 val = DMA_CCMD_GLOBAL_INVL;
984 break;
985 case DMA_CCMD_DOMAIN_INVL:
986 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
987 break;
988 case DMA_CCMD_DEVICE_INVL:
989 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
990 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
991 break;
992 default:
993 BUG();
994 }
995 val |= DMA_CCMD_ICC;
996
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200997 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700998 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
999
1000 /* Make sure hardware complete it */
1001 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1002 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1003
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001004 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001005}
1006
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001007/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001008static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1009 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010{
1011 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1012 u64 val = 0, val_iva = 0;
1013 unsigned long flag;
1014
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001015 switch (type) {
1016 case DMA_TLB_GLOBAL_FLUSH:
1017 /* global flush doesn't need set IVA_REG */
1018 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1019 break;
1020 case DMA_TLB_DSI_FLUSH:
1021 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1022 break;
1023 case DMA_TLB_PSI_FLUSH:
1024 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1025 /* Note: always flush non-leaf currently */
1026 val_iva = size_order | addr;
1027 break;
1028 default:
1029 BUG();
1030 }
1031 /* Note: set drain read/write */
1032#if 0
1033 /*
1034 * This is probably to be super secure.. Looks like we can
1035 * ignore it without any impact.
1036 */
1037 if (cap_read_drain(iommu->cap))
1038 val |= DMA_TLB_READ_DRAIN;
1039#endif
1040 if (cap_write_drain(iommu->cap))
1041 val |= DMA_TLB_WRITE_DRAIN;
1042
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001043 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001044 /* Note: Only uses first TLB reg currently */
1045 if (val_iva)
1046 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1047 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1048
1049 /* Make sure hardware complete it */
1050 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1051 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1052
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001053 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001054
1055 /* check IOTLB invalidation granularity */
1056 if (DMA_TLB_IAIG(val) == 0)
1057 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1058 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1059 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001060 (unsigned long long)DMA_TLB_IIRG(type),
1061 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001062}
1063
Yu Zhao93a23a72009-05-18 13:51:37 +08001064static struct device_domain_info *iommu_support_dev_iotlb(
1065 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001066{
Yu Zhao93a23a72009-05-18 13:51:37 +08001067 int found = 0;
1068 unsigned long flags;
1069 struct device_domain_info *info;
1070 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1071
1072 if (!ecap_dev_iotlb_support(iommu->ecap))
1073 return NULL;
1074
1075 if (!iommu->qi)
1076 return NULL;
1077
1078 spin_lock_irqsave(&device_domain_lock, flags);
1079 list_for_each_entry(info, &domain->devices, link)
1080 if (info->bus == bus && info->devfn == devfn) {
1081 found = 1;
1082 break;
1083 }
1084 spin_unlock_irqrestore(&device_domain_lock, flags);
1085
1086 if (!found || !info->dev)
1087 return NULL;
1088
1089 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1090 return NULL;
1091
1092 if (!dmar_find_matched_atsr_unit(info->dev))
1093 return NULL;
1094
1095 info->iommu = iommu;
1096
1097 return info;
1098}
1099
1100static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1101{
1102 if (!info)
1103 return;
1104
1105 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1106}
1107
1108static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1109{
1110 if (!info->dev || !pci_ats_enabled(info->dev))
1111 return;
1112
1113 pci_disable_ats(info->dev);
1114}
1115
1116static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1117 u64 addr, unsigned mask)
1118{
1119 u16 sid, qdep;
1120 unsigned long flags;
1121 struct device_domain_info *info;
1122
1123 spin_lock_irqsave(&device_domain_lock, flags);
1124 list_for_each_entry(info, &domain->devices, link) {
1125 if (!info->dev || !pci_ats_enabled(info->dev))
1126 continue;
1127
1128 sid = info->bus << 8 | info->devfn;
1129 qdep = pci_ats_queue_depth(info->dev);
1130 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1131 }
1132 spin_unlock_irqrestore(&device_domain_lock, flags);
1133}
1134
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001135static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001136 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001137{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001138 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001139 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001141 BUG_ON(pages == 0);
1142
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001143 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001144 * Fallback to domain selective flush if no PSI support or the size is
1145 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146 * PSI requires page size to be 2 ^ x, and the base address is naturally
1147 * aligned to the size
1148 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001149 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1150 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001151 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001152 else
1153 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1154 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001155
1156 /*
Nadav Amit82653632010-04-01 13:24:40 +03001157 * In caching mode, changes of pages from non-present to present require
1158 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001159 */
Nadav Amit82653632010-04-01 13:24:40 +03001160 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001161 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001162}
1163
mark grossf8bab732008-02-08 04:18:38 -08001164static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1165{
1166 u32 pmen;
1167 unsigned long flags;
1168
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001169 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001170 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1171 pmen &= ~DMA_PMEN_EPM;
1172 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1173
1174 /* wait for the protected region status bit to clear */
1175 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1176 readl, !(pmen & DMA_PMEN_PRS), pmen);
1177
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001178 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001179}
1180
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001181static int iommu_enable_translation(struct intel_iommu *iommu)
1182{
1183 u32 sts;
1184 unsigned long flags;
1185
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001186 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001187 iommu->gcmd |= DMA_GCMD_TE;
1188 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001189
1190 /* Make sure hardware complete it */
1191 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001192 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001193
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001194 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001195 return 0;
1196}
1197
1198static int iommu_disable_translation(struct intel_iommu *iommu)
1199{
1200 u32 sts;
1201 unsigned long flag;
1202
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001203 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001204 iommu->gcmd &= ~DMA_GCMD_TE;
1205 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1206
1207 /* Make sure hardware complete it */
1208 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001209 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001210
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001211 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001212 return 0;
1213}
1214
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001215
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001216static int iommu_init_domains(struct intel_iommu *iommu)
1217{
1218 unsigned long ndomains;
1219 unsigned long nlongs;
1220
1221 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001222 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1223 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001224 nlongs = BITS_TO_LONGS(ndomains);
1225
Donald Dutile94a91b52009-08-20 16:51:34 -04001226 spin_lock_init(&iommu->lock);
1227
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228 /* TBD: there might be 64K domains,
1229 * consider other allocation for future chip
1230 */
1231 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1232 if (!iommu->domain_ids) {
1233 printk(KERN_ERR "Allocating domain id array failed\n");
1234 return -ENOMEM;
1235 }
1236 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1237 GFP_KERNEL);
1238 if (!iommu->domains) {
1239 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001240 return -ENOMEM;
1241 }
1242
1243 /*
1244 * if Caching mode is set, then invalid translations are tagged
1245 * with domainid 0. Hence we need to pre-allocate it.
1246 */
1247 if (cap_caching_mode(iommu->cap))
1248 set_bit(0, iommu->domain_ids);
1249 return 0;
1250}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001251
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001252
1253static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001254static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001255
1256void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001257{
1258 struct dmar_domain *domain;
1259 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001260 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001261
Donald Dutile94a91b52009-08-20 16:51:34 -04001262 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001263 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001264 domain = iommu->domains[i];
1265 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001266
Donald Dutile94a91b52009-08-20 16:51:34 -04001267 spin_lock_irqsave(&domain->iommu_lock, flags);
1268 if (--domain->iommu_count == 0) {
1269 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1270 vm_domain_exit(domain);
1271 else
1272 domain_exit(domain);
1273 }
1274 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001275 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001276 }
1277
1278 if (iommu->gcmd & DMA_GCMD_TE)
1279 iommu_disable_translation(iommu);
1280
1281 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001282 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001283 /* This will mask the irq */
1284 free_irq(iommu->irq, iommu);
1285 destroy_irq(iommu->irq);
1286 }
1287
1288 kfree(iommu->domains);
1289 kfree(iommu->domain_ids);
1290
Weidong Hand9630fe2008-12-08 11:06:32 +08001291 g_iommus[iommu->seq_id] = NULL;
1292
1293 /* if all iommus are freed, free g_iommus */
1294 for (i = 0; i < g_num_of_iommus; i++) {
1295 if (g_iommus[i])
1296 break;
1297 }
1298
1299 if (i == g_num_of_iommus)
1300 kfree(g_iommus);
1301
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001302 /* free context mapping */
1303 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001304}
1305
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001306static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001308 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001309
1310 domain = alloc_domain_mem();
1311 if (!domain)
1312 return NULL;
1313
Suresh Siddha4c923d42009-10-02 11:01:24 -07001314 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001315 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001316 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001317
1318 return domain;
1319}
1320
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001321static int iommu_attach_domain(struct dmar_domain *domain,
1322 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001323{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001324 int num;
1325 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326 unsigned long flags;
1327
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001328 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001329
1330 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001331
1332 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1333 if (num >= ndomains) {
1334 spin_unlock_irqrestore(&iommu->lock, flags);
1335 printk(KERN_ERR "IOMMU: no free domain ids\n");
1336 return -ENOMEM;
1337 }
1338
1339 domain->id = num;
1340 set_bit(num, iommu->domain_ids);
1341 set_bit(iommu->seq_id, &domain->iommu_bmp);
1342 iommu->domains[num] = domain;
1343 spin_unlock_irqrestore(&iommu->lock, flags);
1344
1345 return 0;
1346}
1347
1348static void iommu_detach_domain(struct dmar_domain *domain,
1349 struct intel_iommu *iommu)
1350{
1351 unsigned long flags;
1352 int num, ndomains;
1353 int found = 0;
1354
1355 spin_lock_irqsave(&iommu->lock, flags);
1356 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001357 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001358 if (iommu->domains[num] == domain) {
1359 found = 1;
1360 break;
1361 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001362 }
1363
1364 if (found) {
1365 clear_bit(num, iommu->domain_ids);
1366 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1367 iommu->domains[num] = NULL;
1368 }
Weidong Han8c11e792008-12-08 15:29:22 +08001369 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001370}
1371
1372static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001373static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001374
Joseph Cihula51a63e62011-03-21 11:04:24 -07001375static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001376{
1377 struct pci_dev *pdev = NULL;
1378 struct iova *iova;
1379 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001380
David Millerf6611972008-02-06 01:36:23 -08001381 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001382
Mark Gross8a443df2008-03-04 14:59:31 -08001383 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1384 &reserved_rbtree_key);
1385
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001386 /* IOAPIC ranges shouldn't be accessed by DMA */
1387 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1388 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001389 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001390 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001391 return -ENODEV;
1392 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001393
1394 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1395 for_each_pci_dev(pdev) {
1396 struct resource *r;
1397
1398 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1399 r = &pdev->resource[i];
1400 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1401 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001402 iova = reserve_iova(&reserved_iova_list,
1403 IOVA_PFN(r->start),
1404 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001405 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001406 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001407 return -ENODEV;
1408 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001409 }
1410 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001411 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412}
1413
1414static void domain_reserve_special_ranges(struct dmar_domain *domain)
1415{
1416 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1417}
1418
1419static inline int guestwidth_to_adjustwidth(int gaw)
1420{
1421 int agaw;
1422 int r = (gaw - 12) % 9;
1423
1424 if (r == 0)
1425 agaw = gaw;
1426 else
1427 agaw = gaw + 9 - r;
1428 if (agaw > 64)
1429 agaw = 64;
1430 return agaw;
1431}
1432
1433static int domain_init(struct dmar_domain *domain, int guest_width)
1434{
1435 struct intel_iommu *iommu;
1436 int adjust_width, agaw;
1437 unsigned long sagaw;
1438
David Millerf6611972008-02-06 01:36:23 -08001439 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001440 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001441
1442 domain_reserve_special_ranges(domain);
1443
1444 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001445 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001446 if (guest_width > cap_mgaw(iommu->cap))
1447 guest_width = cap_mgaw(iommu->cap);
1448 domain->gaw = guest_width;
1449 adjust_width = guestwidth_to_adjustwidth(guest_width);
1450 agaw = width_to_agaw(adjust_width);
1451 sagaw = cap_sagaw(iommu->cap);
1452 if (!test_bit(agaw, &sagaw)) {
1453 /* hardware doesn't support it, choose a bigger one */
1454 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1455 agaw = find_next_bit(&sagaw, 5, agaw);
1456 if (agaw >= 5)
1457 return -ENODEV;
1458 }
1459 domain->agaw = agaw;
1460 INIT_LIST_HEAD(&domain->devices);
1461
Weidong Han8e6040972008-12-08 15:49:06 +08001462 if (ecap_coherent(iommu->ecap))
1463 domain->iommu_coherency = 1;
1464 else
1465 domain->iommu_coherency = 0;
1466
Sheng Yang58c610b2009-03-18 15:33:05 +08001467 if (ecap_sc_support(iommu->ecap))
1468 domain->iommu_snooping = 1;
1469 else
1470 domain->iommu_snooping = 0;
1471
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001472 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001473 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001474 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001475
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001476 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001477 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001478 if (!domain->pgd)
1479 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001480 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481 return 0;
1482}
1483
1484static void domain_exit(struct dmar_domain *domain)
1485{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001486 struct dmar_drhd_unit *drhd;
1487 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001488
1489 /* Domain 0 is reserved, so dont process it */
1490 if (!domain)
1491 return;
1492
Alex Williamson7b668352011-05-24 12:02:41 +01001493 /* Flush any lazy unmaps that may reference this domain */
1494 if (!intel_iommu_strict)
1495 flush_unmaps_timeout(0);
1496
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001497 domain_remove_dev_info(domain);
1498 /* destroy iovas */
1499 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500
1501 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001502 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503
1504 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001505 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001507 for_each_active_iommu(iommu, drhd)
1508 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1509 iommu_detach_domain(domain, iommu);
1510
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001511 free_domain_mem(domain);
1512}
1513
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001514static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1515 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001516{
1517 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001519 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001520 struct dma_pte *pgd;
1521 unsigned long num;
1522 unsigned long ndomains;
1523 int id;
1524 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001525 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001526
1527 pr_debug("Set context mapping for %02x:%02x.%d\n",
1528 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001529
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001530 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001531 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1532 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001533
David Woodhouse276dbf992009-04-04 01:45:37 +01001534 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001535 if (!iommu)
1536 return -ENODEV;
1537
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001538 context = device_to_context_entry(iommu, bus, devfn);
1539 if (!context)
1540 return -ENOMEM;
1541 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001542 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001543 spin_unlock_irqrestore(&iommu->lock, flags);
1544 return 0;
1545 }
1546
Weidong Hanea6606b2008-12-08 23:08:15 +08001547 id = domain->id;
1548 pgd = domain->pgd;
1549
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001550 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1551 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001552 int found = 0;
1553
1554 /* find an available domain id for this device in iommu */
1555 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001556 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001557 if (iommu->domains[num] == domain) {
1558 id = num;
1559 found = 1;
1560 break;
1561 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001562 }
1563
1564 if (found == 0) {
1565 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1566 if (num >= ndomains) {
1567 spin_unlock_irqrestore(&iommu->lock, flags);
1568 printk(KERN_ERR "IOMMU: no free domain ids\n");
1569 return -EFAULT;
1570 }
1571
1572 set_bit(num, iommu->domain_ids);
1573 iommu->domains[num] = domain;
1574 id = num;
1575 }
1576
1577 /* Skip top levels of page tables for
1578 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001579 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001580 */
Chris Wright1672af12009-12-02 12:06:34 -08001581 if (translation != CONTEXT_TT_PASS_THROUGH) {
1582 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1583 pgd = phys_to_virt(dma_pte_addr(pgd));
1584 if (!dma_pte_present(pgd)) {
1585 spin_unlock_irqrestore(&iommu->lock, flags);
1586 return -ENOMEM;
1587 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001588 }
1589 }
1590 }
1591
1592 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001593
Yu Zhao93a23a72009-05-18 13:51:37 +08001594 if (translation != CONTEXT_TT_PASS_THROUGH) {
1595 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1596 translation = info ? CONTEXT_TT_DEV_IOTLB :
1597 CONTEXT_TT_MULTI_LEVEL;
1598 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001599 /*
1600 * In pass through mode, AW must be programmed to indicate the largest
1601 * AGAW value supported by hardware. And ASR is ignored by hardware.
1602 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001603 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001604 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001605 else {
1606 context_set_address_root(context, virt_to_phys(pgd));
1607 context_set_address_width(context, iommu->agaw);
1608 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001609
1610 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001611 context_set_fault_enable(context);
1612 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001613 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001614
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001615 /*
1616 * It's a non-present to present mapping. If hardware doesn't cache
1617 * non-present entry we only need to flush the write-buffer. If the
1618 * _does_ cache non-present entries, then it does so in the special
1619 * domain #0, which we have to flush:
1620 */
1621 if (cap_caching_mode(iommu->cap)) {
1622 iommu->flush.flush_context(iommu, 0,
1623 (((u16)bus) << 8) | devfn,
1624 DMA_CCMD_MASK_NOBIT,
1625 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001626 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001627 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001628 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001629 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001630 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001632
1633 spin_lock_irqsave(&domain->iommu_lock, flags);
1634 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1635 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001636 if (domain->iommu_count == 1)
1637 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001638 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001639 }
1640 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001641 return 0;
1642}
1643
1644static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001645domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1646 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001647{
1648 int ret;
1649 struct pci_dev *tmp, *parent;
1650
David Woodhouse276dbf992009-04-04 01:45:37 +01001651 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001652 pdev->bus->number, pdev->devfn,
1653 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001654 if (ret)
1655 return ret;
1656
1657 /* dependent device mapping */
1658 tmp = pci_find_upstream_pcie_bridge(pdev);
1659 if (!tmp)
1660 return 0;
1661 /* Secondary interface's bus number and devfn 0 */
1662 parent = pdev->bus->self;
1663 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001664 ret = domain_context_mapping_one(domain,
1665 pci_domain_nr(parent->bus),
1666 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001667 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001668 if (ret)
1669 return ret;
1670 parent = parent->bus->self;
1671 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001672 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001673 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001674 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001675 tmp->subordinate->number, 0,
1676 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001677 else /* this is a legacy PCI bridge */
1678 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001679 pci_domain_nr(tmp->bus),
1680 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001681 tmp->devfn,
1682 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001683}
1684
Weidong Han5331fe62008-12-08 23:00:00 +08001685static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686{
1687 int ret;
1688 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001689 struct intel_iommu *iommu;
1690
David Woodhouse276dbf992009-04-04 01:45:37 +01001691 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1692 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001693 if (!iommu)
1694 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695
David Woodhouse276dbf992009-04-04 01:45:37 +01001696 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001697 if (!ret)
1698 return ret;
1699 /* dependent device mapping */
1700 tmp = pci_find_upstream_pcie_bridge(pdev);
1701 if (!tmp)
1702 return ret;
1703 /* Secondary interface's bus number and devfn 0 */
1704 parent = pdev->bus->self;
1705 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001706 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001707 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001708 if (!ret)
1709 return ret;
1710 parent = parent->bus->self;
1711 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001712 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001713 return device_context_mapped(iommu, tmp->subordinate->number,
1714 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001716 return device_context_mapped(iommu, tmp->bus->number,
1717 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718}
1719
Fenghua Yuf5329592009-08-04 15:09:37 -07001720/* Returns a number of VTD pages, but aligned to MM page size */
1721static inline unsigned long aligned_nrpages(unsigned long host_addr,
1722 size_t size)
1723{
1724 host_addr &= ~PAGE_MASK;
1725 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1726}
1727
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001728/* Return largest possible superpage level for a given mapping */
1729static inline int hardware_largepage_caps(struct dmar_domain *domain,
1730 unsigned long iov_pfn,
1731 unsigned long phy_pfn,
1732 unsigned long pages)
1733{
1734 int support, level = 1;
1735 unsigned long pfnmerge;
1736
1737 support = domain->iommu_superpage;
1738
1739 /* To use a large page, the virtual *and* physical addresses
1740 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1741 of them will mean we have to use smaller pages. So just
1742 merge them and check both at once. */
1743 pfnmerge = iov_pfn | phy_pfn;
1744
1745 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1746 pages >>= VTD_STRIDE_SHIFT;
1747 if (!pages)
1748 break;
1749 pfnmerge >>= VTD_STRIDE_SHIFT;
1750 level++;
1751 support--;
1752 }
1753 return level;
1754}
1755
David Woodhouse9051aa02009-06-29 12:30:54 +01001756static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1757 struct scatterlist *sg, unsigned long phys_pfn,
1758 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001759{
1760 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001761 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001762 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001763 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001764 unsigned int largepage_lvl = 0;
1765 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001766
1767 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1768
1769 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1770 return -EINVAL;
1771
1772 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1773
David Woodhouse9051aa02009-06-29 12:30:54 +01001774 if (sg)
1775 sg_res = 0;
1776 else {
1777 sg_res = nr_pages + 1;
1778 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1779 }
1780
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001781 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001782 uint64_t tmp;
1783
David Woodhousee1605492009-06-29 11:17:38 +01001784 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001785 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001786 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1787 sg->dma_length = sg->length;
1788 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001789 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001790 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001791
David Woodhousee1605492009-06-29 11:17:38 +01001792 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001793 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1794
1795 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001796 if (!pte)
1797 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001798 /* It is large page*/
1799 if (largepage_lvl > 1)
1800 pteval |= DMA_PTE_LARGE_PAGE;
1801 else
1802 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1803
David Woodhousee1605492009-06-29 11:17:38 +01001804 }
1805 /* We don't need lock here, nobody else
1806 * touches the iova range
1807 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001808 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001809 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001810 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001811 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1812 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001813 if (dumps) {
1814 dumps--;
1815 debug_dma_dump_mappings(NULL);
1816 }
1817 WARN_ON(1);
1818 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001819
1820 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1821
1822 BUG_ON(nr_pages < lvl_pages);
1823 BUG_ON(sg_res < lvl_pages);
1824
1825 nr_pages -= lvl_pages;
1826 iov_pfn += lvl_pages;
1827 phys_pfn += lvl_pages;
1828 pteval += lvl_pages * VTD_PAGE_SIZE;
1829 sg_res -= lvl_pages;
1830
1831 /* If the next PTE would be the first in a new page, then we
1832 need to flush the cache on the entries we've just written.
1833 And then we'll need to recalculate 'pte', so clear it and
1834 let it get set again in the if (!pte) block above.
1835
1836 If we're done (!nr_pages) we need to flush the cache too.
1837
1838 Also if we've been setting superpages, we may need to
1839 recalculate 'pte' and switch back to smaller pages for the
1840 end of the mapping, if the trailing size is not enough to
1841 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001842 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001843 if (!nr_pages || first_pte_in_page(pte) ||
1844 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001845 domain_flush_cache(domain, first_pte,
1846 (void *)pte - (void *)first_pte);
1847 pte = NULL;
1848 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001849
1850 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001851 sg = sg_next(sg);
1852 }
1853 return 0;
1854}
1855
David Woodhouse9051aa02009-06-29 12:30:54 +01001856static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1857 struct scatterlist *sg, unsigned long nr_pages,
1858 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001859{
David Woodhouse9051aa02009-06-29 12:30:54 +01001860 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1861}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001862
David Woodhouse9051aa02009-06-29 12:30:54 +01001863static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1864 unsigned long phys_pfn, unsigned long nr_pages,
1865 int prot)
1866{
1867 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001868}
1869
Weidong Hanc7151a82008-12-08 22:51:37 +08001870static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871{
Weidong Hanc7151a82008-12-08 22:51:37 +08001872 if (!iommu)
1873 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001874
1875 clear_context_table(iommu, bus, devfn);
1876 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001877 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001878 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001879}
1880
1881static void domain_remove_dev_info(struct dmar_domain *domain)
1882{
1883 struct device_domain_info *info;
1884 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001885 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886
1887 spin_lock_irqsave(&device_domain_lock, flags);
1888 while (!list_empty(&domain->devices)) {
1889 info = list_entry(domain->devices.next,
1890 struct device_domain_info, link);
1891 list_del(&info->link);
1892 list_del(&info->global);
1893 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001894 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001895 spin_unlock_irqrestore(&device_domain_lock, flags);
1896
Yu Zhao93a23a72009-05-18 13:51:37 +08001897 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001898 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001899 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001900 free_devinfo_mem(info);
1901
1902 spin_lock_irqsave(&device_domain_lock, flags);
1903 }
1904 spin_unlock_irqrestore(&device_domain_lock, flags);
1905}
1906
1907/*
1908 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001909 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001910 */
Kay, Allen M38717942008-09-09 18:37:29 +03001911static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001912find_domain(struct pci_dev *pdev)
1913{
1914 struct device_domain_info *info;
1915
1916 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001917 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918 if (info)
1919 return info->domain;
1920 return NULL;
1921}
1922
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001923/* domain is initialized */
1924static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1925{
1926 struct dmar_domain *domain, *found = NULL;
1927 struct intel_iommu *iommu;
1928 struct dmar_drhd_unit *drhd;
1929 struct device_domain_info *info, *tmp;
1930 struct pci_dev *dev_tmp;
1931 unsigned long flags;
1932 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001933 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001934 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001935
1936 domain = find_domain(pdev);
1937 if (domain)
1938 return domain;
1939
David Woodhouse276dbf992009-04-04 01:45:37 +01001940 segment = pci_domain_nr(pdev->bus);
1941
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001942 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1943 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001944 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 bus = dev_tmp->subordinate->number;
1946 devfn = 0;
1947 } else {
1948 bus = dev_tmp->bus->number;
1949 devfn = dev_tmp->devfn;
1950 }
1951 spin_lock_irqsave(&device_domain_lock, flags);
1952 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001953 if (info->segment == segment &&
1954 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001955 found = info->domain;
1956 break;
1957 }
1958 }
1959 spin_unlock_irqrestore(&device_domain_lock, flags);
1960 /* pcie-pci bridge already has a domain, uses it */
1961 if (found) {
1962 domain = found;
1963 goto found_domain;
1964 }
1965 }
1966
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001967 domain = alloc_domain();
1968 if (!domain)
1969 goto error;
1970
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001971 /* Allocate new domain for the device */
1972 drhd = dmar_find_matched_drhd_unit(pdev);
1973 if (!drhd) {
1974 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1975 pci_name(pdev));
1976 return NULL;
1977 }
1978 iommu = drhd->iommu;
1979
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001980 ret = iommu_attach_domain(domain, iommu);
1981 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07001982 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001983 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001984 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001985
1986 if (domain_init(domain, gaw)) {
1987 domain_exit(domain);
1988 goto error;
1989 }
1990
1991 /* register pcie-to-pci device */
1992 if (dev_tmp) {
1993 info = alloc_devinfo_mem();
1994 if (!info) {
1995 domain_exit(domain);
1996 goto error;
1997 }
David Woodhouse276dbf992009-04-04 01:45:37 +01001998 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001999 info->bus = bus;
2000 info->devfn = devfn;
2001 info->dev = NULL;
2002 info->domain = domain;
2003 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002004 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002005
2006 /* pcie-to-pci bridge already has a domain, uses it */
2007 found = NULL;
2008 spin_lock_irqsave(&device_domain_lock, flags);
2009 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002010 if (tmp->segment == segment &&
2011 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002012 found = tmp->domain;
2013 break;
2014 }
2015 }
2016 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002017 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002018 free_devinfo_mem(info);
2019 domain_exit(domain);
2020 domain = found;
2021 } else {
2022 list_add(&info->link, &domain->devices);
2023 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002024 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002025 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002026 }
2027
2028found_domain:
2029 info = alloc_devinfo_mem();
2030 if (!info)
2031 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002032 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002033 info->bus = pdev->bus->number;
2034 info->devfn = pdev->devfn;
2035 info->dev = pdev;
2036 info->domain = domain;
2037 spin_lock_irqsave(&device_domain_lock, flags);
2038 /* somebody is fast */
2039 found = find_domain(pdev);
2040 if (found != NULL) {
2041 spin_unlock_irqrestore(&device_domain_lock, flags);
2042 if (found != domain) {
2043 domain_exit(domain);
2044 domain = found;
2045 }
2046 free_devinfo_mem(info);
2047 return domain;
2048 }
2049 list_add(&info->link, &domain->devices);
2050 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002051 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002052 spin_unlock_irqrestore(&device_domain_lock, flags);
2053 return domain;
2054error:
2055 /* recheck it here, maybe others set it */
2056 return find_domain(pdev);
2057}
2058
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002059static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002060#define IDENTMAP_ALL 1
2061#define IDENTMAP_GFX 2
2062#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002063
David Woodhouseb2132032009-06-26 18:50:28 +01002064static int iommu_domain_identity_map(struct dmar_domain *domain,
2065 unsigned long long start,
2066 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002067{
David Woodhousec5395d52009-06-28 16:35:56 +01002068 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2069 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002070
David Woodhousec5395d52009-06-28 16:35:56 +01002071 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2072 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002074 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002075 }
2076
David Woodhousec5395d52009-06-28 16:35:56 +01002077 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2078 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002079 /*
2080 * RMRR range might have overlap with physical memory range,
2081 * clear it first
2082 */
David Woodhousec5395d52009-06-28 16:35:56 +01002083 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002084
David Woodhousec5395d52009-06-28 16:35:56 +01002085 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2086 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002087 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002088}
2089
2090static int iommu_prepare_identity_map(struct pci_dev *pdev,
2091 unsigned long long start,
2092 unsigned long long end)
2093{
2094 struct dmar_domain *domain;
2095 int ret;
2096
David Woodhousec7ab48d2009-06-26 19:10:36 +01002097 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002098 if (!domain)
2099 return -ENOMEM;
2100
David Woodhouse19943b02009-08-04 16:19:20 +01002101 /* For _hardware_ passthrough, don't bother. But for software
2102 passthrough, we do it anyway -- it may indicate a memory
2103 range which is reserved in E820, so which didn't get set
2104 up to start with in si_domain */
2105 if (domain == si_domain && hw_pass_through) {
2106 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2107 pci_name(pdev), start, end);
2108 return 0;
2109 }
2110
2111 printk(KERN_INFO
2112 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2113 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002114
David Woodhouse5595b522009-12-02 09:21:55 +00002115 if (end < start) {
2116 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2117 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2118 dmi_get_system_info(DMI_BIOS_VENDOR),
2119 dmi_get_system_info(DMI_BIOS_VERSION),
2120 dmi_get_system_info(DMI_PRODUCT_VERSION));
2121 ret = -EIO;
2122 goto error;
2123 }
2124
David Woodhouse2ff729f2009-08-26 14:25:41 +01002125 if (end >> agaw_to_width(domain->agaw)) {
2126 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2127 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2128 agaw_to_width(domain->agaw),
2129 dmi_get_system_info(DMI_BIOS_VENDOR),
2130 dmi_get_system_info(DMI_BIOS_VERSION),
2131 dmi_get_system_info(DMI_PRODUCT_VERSION));
2132 ret = -EIO;
2133 goto error;
2134 }
David Woodhouse19943b02009-08-04 16:19:20 +01002135
David Woodhouseb2132032009-06-26 18:50:28 +01002136 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002137 if (ret)
2138 goto error;
2139
2140 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002141 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002142 if (ret)
2143 goto error;
2144
2145 return 0;
2146
2147 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002148 domain_exit(domain);
2149 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002150}
2151
2152static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2153 struct pci_dev *pdev)
2154{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002155 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002156 return 0;
2157 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002158 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159}
2160
Suresh Siddhad3f13812011-08-23 17:05:25 -07002161#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002162static inline void iommu_prepare_isa(void)
2163{
2164 struct pci_dev *pdev;
2165 int ret;
2166
2167 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2168 if (!pdev)
2169 return;
2170
David Woodhousec7ab48d2009-06-26 19:10:36 +01002171 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002172 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002173
2174 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002175 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2176 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002177
2178}
2179#else
2180static inline void iommu_prepare_isa(void)
2181{
2182 return;
2183}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002184#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002185
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002186static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002187
2188static int __init si_domain_work_fn(unsigned long start_pfn,
2189 unsigned long end_pfn, void *datax)
2190{
2191 int *ret = datax;
2192
2193 *ret = iommu_domain_identity_map(si_domain,
2194 (uint64_t)start_pfn << PAGE_SHIFT,
2195 (uint64_t)end_pfn << PAGE_SHIFT);
2196 return *ret;
2197
2198}
2199
Matt Kraai071e1372009-08-23 22:30:22 -07002200static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002201{
2202 struct dmar_drhd_unit *drhd;
2203 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002204 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002205
2206 si_domain = alloc_domain();
2207 if (!si_domain)
2208 return -EFAULT;
2209
David Woodhousec7ab48d2009-06-26 19:10:36 +01002210 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002211
2212 for_each_active_iommu(iommu, drhd) {
2213 ret = iommu_attach_domain(si_domain, iommu);
2214 if (ret) {
2215 domain_exit(si_domain);
2216 return -EFAULT;
2217 }
2218 }
2219
2220 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2221 domain_exit(si_domain);
2222 return -EFAULT;
2223 }
2224
2225 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2226
David Woodhouse19943b02009-08-04 16:19:20 +01002227 if (hw)
2228 return 0;
2229
David Woodhousec7ab48d2009-06-26 19:10:36 +01002230 for_each_online_node(nid) {
2231 work_with_active_regions(nid, si_domain_work_fn, &ret);
2232 if (ret)
2233 return ret;
2234 }
2235
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002236 return 0;
2237}
2238
2239static void domain_remove_one_dev_info(struct dmar_domain *domain,
2240 struct pci_dev *pdev);
2241static int identity_mapping(struct pci_dev *pdev)
2242{
2243 struct device_domain_info *info;
2244
2245 if (likely(!iommu_identity_mapping))
2246 return 0;
2247
Mike Traviscb452a42011-05-28 13:15:03 -05002248 info = pdev->dev.archdata.iommu;
2249 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2250 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002251
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002252 return 0;
2253}
2254
2255static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002256 struct pci_dev *pdev,
2257 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002258{
2259 struct device_domain_info *info;
2260 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002261 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002262
2263 info = alloc_devinfo_mem();
2264 if (!info)
2265 return -ENOMEM;
2266
David Woodhouse5fe60f42009-08-09 10:53:41 +01002267 ret = domain_context_mapping(domain, pdev, translation);
2268 if (ret) {
2269 free_devinfo_mem(info);
2270 return ret;
2271 }
2272
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002273 info->segment = pci_domain_nr(pdev->bus);
2274 info->bus = pdev->bus->number;
2275 info->devfn = pdev->devfn;
2276 info->dev = pdev;
2277 info->domain = domain;
2278
2279 spin_lock_irqsave(&device_domain_lock, flags);
2280 list_add(&info->link, &domain->devices);
2281 list_add(&info->global, &device_domain_list);
2282 pdev->dev.archdata.iommu = info;
2283 spin_unlock_irqrestore(&device_domain_lock, flags);
2284
2285 return 0;
2286}
2287
David Woodhouse6941af22009-07-04 18:24:27 +01002288static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2289{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002290 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2291 return 1;
2292
2293 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2294 return 1;
2295
2296 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2297 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002298
David Woodhouse3dfc8132009-07-04 19:11:08 +01002299 /*
2300 * We want to start off with all devices in the 1:1 domain, and
2301 * take them out later if we find they can't access all of memory.
2302 *
2303 * However, we can't do this for PCI devices behind bridges,
2304 * because all PCI devices behind the same bridge will end up
2305 * with the same source-id on their transactions.
2306 *
2307 * Practically speaking, we can't change things around for these
2308 * devices at run-time, because we can't be sure there'll be no
2309 * DMA transactions in flight for any of their siblings.
2310 *
2311 * So PCI devices (unless they're on the root bus) as well as
2312 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2313 * the 1:1 domain, just in _case_ one of their siblings turns out
2314 * not to be able to map all of memory.
2315 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002316 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002317 if (!pci_is_root_bus(pdev->bus))
2318 return 0;
2319 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2320 return 0;
2321 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2322 return 0;
2323
2324 /*
2325 * At boot time, we don't yet know if devices will be 64-bit capable.
2326 * Assume that they will -- if they turn out not to be, then we can
2327 * take them out of the 1:1 domain later.
2328 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002329 if (!startup) {
2330 /*
2331 * If the device's dma_mask is less than the system's memory
2332 * size then this is not a candidate for identity mapping.
2333 */
2334 u64 dma_mask = pdev->dma_mask;
2335
2336 if (pdev->dev.coherent_dma_mask &&
2337 pdev->dev.coherent_dma_mask < dma_mask)
2338 dma_mask = pdev->dev.coherent_dma_mask;
2339
2340 return dma_mask >= dma_get_required_mask(&pdev->dev);
2341 }
David Woodhouse6941af22009-07-04 18:24:27 +01002342
2343 return 1;
2344}
2345
Matt Kraai071e1372009-08-23 22:30:22 -07002346static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002347{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002348 struct pci_dev *pdev = NULL;
2349 int ret;
2350
David Woodhouse19943b02009-08-04 16:19:20 +01002351 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002352 if (ret)
2353 return -EFAULT;
2354
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002355 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002356 /* Skip Host/PCI Bridge devices */
2357 if (IS_BRIDGE_HOST_DEVICE(pdev))
2358 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002359 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002360 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2361 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002362
David Woodhouse5fe60f42009-08-09 10:53:41 +01002363 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002364 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002365 CONTEXT_TT_MULTI_LEVEL);
2366 if (ret)
2367 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002368 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002369 }
2370
2371 return 0;
2372}
2373
Joseph Cihulab7792602011-05-03 00:08:37 -07002374static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002375{
2376 struct dmar_drhd_unit *drhd;
2377 struct dmar_rmrr_unit *rmrr;
2378 struct pci_dev *pdev;
2379 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002380 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002381
2382 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002383 * for each drhd
2384 * allocate root
2385 * initialize and program root entry to not present
2386 * endfor
2387 */
2388 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002389 g_num_of_iommus++;
2390 /*
2391 * lock not needed as this is only incremented in the single
2392 * threaded kernel __init code path all other access are read
2393 * only
2394 */
2395 }
2396
Weidong Hand9630fe2008-12-08 11:06:32 +08002397 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2398 GFP_KERNEL);
2399 if (!g_iommus) {
2400 printk(KERN_ERR "Allocating global iommu array failed\n");
2401 ret = -ENOMEM;
2402 goto error;
2403 }
2404
mark gross80b20dd2008-04-18 13:53:58 -07002405 deferred_flush = kzalloc(g_num_of_iommus *
2406 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2407 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002408 ret = -ENOMEM;
2409 goto error;
2410 }
2411
mark gross5e0d2a62008-03-04 15:22:08 -08002412 for_each_drhd_unit(drhd) {
2413 if (drhd->ignored)
2414 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002415
2416 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002417 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002418
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002419 ret = iommu_init_domains(iommu);
2420 if (ret)
2421 goto error;
2422
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002423 /*
2424 * TBD:
2425 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002426 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002427 */
2428 ret = iommu_alloc_root_entry(iommu);
2429 if (ret) {
2430 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2431 goto error;
2432 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002433 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002434 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002435 }
2436
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002437 /*
2438 * Start from the sane iommu hardware state.
2439 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002440 for_each_drhd_unit(drhd) {
2441 if (drhd->ignored)
2442 continue;
2443
2444 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002445
2446 /*
2447 * If the queued invalidation is already initialized by us
2448 * (for example, while enabling interrupt-remapping) then
2449 * we got the things already rolling from a sane state.
2450 */
2451 if (iommu->qi)
2452 continue;
2453
2454 /*
2455 * Clear any previous faults.
2456 */
2457 dmar_fault(-1, iommu);
2458 /*
2459 * Disable queued invalidation if supported and already enabled
2460 * before OS handover.
2461 */
2462 dmar_disable_qi(iommu);
2463 }
2464
2465 for_each_drhd_unit(drhd) {
2466 if (drhd->ignored)
2467 continue;
2468
2469 iommu = drhd->iommu;
2470
Youquan Songa77b67d2008-10-16 16:31:56 -07002471 if (dmar_enable_qi(iommu)) {
2472 /*
2473 * Queued Invalidate not enabled, use Register Based
2474 * Invalidate
2475 */
2476 iommu->flush.flush_context = __iommu_flush_context;
2477 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002478 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002479 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002480 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002481 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002482 } else {
2483 iommu->flush.flush_context = qi_flush_context;
2484 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002485 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002486 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002487 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002488 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002489 }
2490 }
2491
David Woodhouse19943b02009-08-04 16:19:20 +01002492 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002493 iommu_identity_mapping |= IDENTMAP_ALL;
2494
Suresh Siddhad3f13812011-08-23 17:05:25 -07002495#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002496 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002497#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002498
2499 check_tylersburg_isoch();
2500
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002501 /*
2502 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002503 * identity mappings for rmrr, gfx, and isa and may fall back to static
2504 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002505 */
David Woodhouse19943b02009-08-04 16:19:20 +01002506 if (iommu_identity_mapping) {
2507 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2508 if (ret) {
2509 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2510 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002511 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002512 }
David Woodhouse19943b02009-08-04 16:19:20 +01002513 /*
2514 * For each rmrr
2515 * for each dev attached to rmrr
2516 * do
2517 * locate drhd for dev, alloc domain for dev
2518 * allocate free domain
2519 * allocate page table entries for rmrr
2520 * if context not allocated for bus
2521 * allocate and init context
2522 * set present in root table for this bus
2523 * init context with domain, translation etc
2524 * endfor
2525 * endfor
2526 */
2527 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2528 for_each_rmrr_units(rmrr) {
2529 for (i = 0; i < rmrr->devices_cnt; i++) {
2530 pdev = rmrr->devices[i];
2531 /*
2532 * some BIOS lists non-exist devices in DMAR
2533 * table.
2534 */
2535 if (!pdev)
2536 continue;
2537 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2538 if (ret)
2539 printk(KERN_ERR
2540 "IOMMU: mapping reserved region failed\n");
2541 }
2542 }
2543
2544 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002545
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002546 /*
2547 * for each drhd
2548 * enable fault log
2549 * global invalidate context cache
2550 * global invalidate iotlb
2551 * enable translation
2552 */
2553 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002554 if (drhd->ignored) {
2555 /*
2556 * we always have to disable PMRs or DMA may fail on
2557 * this device
2558 */
2559 if (force_on)
2560 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002561 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002562 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002563 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002564
2565 iommu_flush_write_buffer(iommu);
2566
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002567 ret = dmar_set_interrupt(iommu);
2568 if (ret)
2569 goto error;
2570
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002571 iommu_set_root_entry(iommu);
2572
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002573 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002574 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002575
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002576 ret = iommu_enable_translation(iommu);
2577 if (ret)
2578 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002579
2580 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002581 }
2582
2583 return 0;
2584error:
2585 for_each_drhd_unit(drhd) {
2586 if (drhd->ignored)
2587 continue;
2588 iommu = drhd->iommu;
2589 free_iommu(iommu);
2590 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002591 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002592 return ret;
2593}
2594
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002595/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002596static struct iova *intel_alloc_iova(struct device *dev,
2597 struct dmar_domain *domain,
2598 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002599{
2600 struct pci_dev *pdev = to_pci_dev(dev);
2601 struct iova *iova = NULL;
2602
David Woodhouse875764d2009-06-28 21:20:51 +01002603 /* Restrict dma_mask to the width that the iommu can handle */
2604 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2605
2606 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002607 /*
2608 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002609 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002610 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002611 */
David Woodhouse875764d2009-06-28 21:20:51 +01002612 iova = alloc_iova(&domain->iovad, nrpages,
2613 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2614 if (iova)
2615 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002616 }
David Woodhouse875764d2009-06-28 21:20:51 +01002617 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2618 if (unlikely(!iova)) {
2619 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2620 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002621 return NULL;
2622 }
2623
2624 return iova;
2625}
2626
David Woodhouse147202a2009-07-07 19:43:20 +01002627static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002628{
2629 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002630 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631
2632 domain = get_domain_for_dev(pdev,
2633 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2634 if (!domain) {
2635 printk(KERN_ERR
2636 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002637 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002638 }
2639
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002640 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002641 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002642 ret = domain_context_mapping(domain, pdev,
2643 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002644 if (ret) {
2645 printk(KERN_ERR
2646 "Domain context map for %s failed",
2647 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002648 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002649 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002650 }
2651
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002652 return domain;
2653}
2654
David Woodhouse147202a2009-07-07 19:43:20 +01002655static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2656{
2657 struct device_domain_info *info;
2658
2659 /* No lock here, assumes no domain exit in normal case */
2660 info = dev->dev.archdata.iommu;
2661 if (likely(info))
2662 return info->domain;
2663
2664 return __get_valid_domain_for_dev(dev);
2665}
2666
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002667static int iommu_dummy(struct pci_dev *pdev)
2668{
2669 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2670}
2671
2672/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002673static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002674{
David Woodhouse73676832009-07-04 14:08:36 +01002675 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002676 int found;
2677
David Woodhouse73676832009-07-04 14:08:36 +01002678 if (unlikely(dev->bus != &pci_bus_type))
2679 return 1;
2680
2681 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002682 if (iommu_dummy(pdev))
2683 return 1;
2684
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002685 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002686 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002687
2688 found = identity_mapping(pdev);
2689 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002690 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002691 return 1;
2692 else {
2693 /*
2694 * 32 bit DMA is removed from si_domain and fall back
2695 * to non-identity mapping.
2696 */
2697 domain_remove_one_dev_info(si_domain, pdev);
2698 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2699 pci_name(pdev));
2700 return 0;
2701 }
2702 } else {
2703 /*
2704 * In case of a detached 64 bit DMA device from vm, the device
2705 * is put into si_domain for identity mapping.
2706 */
David Woodhouse6941af22009-07-04 18:24:27 +01002707 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002708 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002709 ret = domain_add_dev_info(si_domain, pdev,
2710 hw_pass_through ?
2711 CONTEXT_TT_PASS_THROUGH :
2712 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002713 if (!ret) {
2714 printk(KERN_INFO "64bit %s uses identity mapping\n",
2715 pci_name(pdev));
2716 return 1;
2717 }
2718 }
2719 }
2720
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002721 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002722}
2723
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002724static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2725 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002726{
2727 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002728 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002729 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002730 struct iova *iova;
2731 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002732 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002733 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002734 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002735
2736 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002737
David Woodhouse73676832009-07-04 14:08:36 +01002738 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002739 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002740
2741 domain = get_valid_domain_for_dev(pdev);
2742 if (!domain)
2743 return 0;
2744
Weidong Han8c11e792008-12-08 15:29:22 +08002745 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002746 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002747
Mike Travisc681d0b2011-05-28 13:15:05 -05002748 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002749 if (!iova)
2750 goto error;
2751
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002752 /*
2753 * Check if DMAR supports zero-length reads on write only
2754 * mappings..
2755 */
2756 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002757 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002758 prot |= DMA_PTE_READ;
2759 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2760 prot |= DMA_PTE_WRITE;
2761 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002762 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002763 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002764 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002765 * is not a big problem
2766 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002767 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002768 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002769 if (ret)
2770 goto error;
2771
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002772 /* it's a non-present to present mapping. Only flush if caching mode */
2773 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002774 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002775 else
Weidong Han8c11e792008-12-08 15:29:22 +08002776 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002777
David Woodhouse03d6a242009-06-28 15:33:46 +01002778 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2779 start_paddr += paddr & ~PAGE_MASK;
2780 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002781
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002782error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002783 if (iova)
2784 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002785 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002786 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002787 return 0;
2788}
2789
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002790static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2791 unsigned long offset, size_t size,
2792 enum dma_data_direction dir,
2793 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002794{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002795 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2796 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002797}
2798
mark gross5e0d2a62008-03-04 15:22:08 -08002799static void flush_unmaps(void)
2800{
mark gross80b20dd2008-04-18 13:53:58 -07002801 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002802
mark gross5e0d2a62008-03-04 15:22:08 -08002803 timer_on = 0;
2804
2805 /* just flush them all */
2806 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002807 struct intel_iommu *iommu = g_iommus[i];
2808 if (!iommu)
2809 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002810
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002811 if (!deferred_flush[i].next)
2812 continue;
2813
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002814 /* In caching mode, global flushes turn emulation expensive */
2815 if (!cap_caching_mode(iommu->cap))
2816 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002817 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002818 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002819 unsigned long mask;
2820 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002821 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002822
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002823 /* On real hardware multiple invalidations are expensive */
2824 if (cap_caching_mode(iommu->cap))
2825 iommu_flush_iotlb_psi(iommu, domain->id,
2826 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2827 else {
2828 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2829 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2830 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2831 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002832 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002833 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002834 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002835 }
2836
mark gross5e0d2a62008-03-04 15:22:08 -08002837 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002838}
2839
2840static void flush_unmaps_timeout(unsigned long data)
2841{
mark gross80b20dd2008-04-18 13:53:58 -07002842 unsigned long flags;
2843
2844 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002845 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002846 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002847}
2848
2849static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2850{
2851 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002852 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002853 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002854
2855 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002856 if (list_size == HIGH_WATER_MARK)
2857 flush_unmaps();
2858
Weidong Han8c11e792008-12-08 15:29:22 +08002859 iommu = domain_get_iommu(dom);
2860 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002861
mark gross80b20dd2008-04-18 13:53:58 -07002862 next = deferred_flush[iommu_id].next;
2863 deferred_flush[iommu_id].domain[next] = dom;
2864 deferred_flush[iommu_id].iova[next] = iova;
2865 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002866
2867 if (!timer_on) {
2868 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2869 timer_on = 1;
2870 }
2871 list_size++;
2872 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2873}
2874
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002875static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2876 size_t size, enum dma_data_direction dir,
2877 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002878{
2879 struct pci_dev *pdev = to_pci_dev(dev);
2880 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002881 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002882 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002883 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002884
David Woodhouse73676832009-07-04 14:08:36 +01002885 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002886 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002887
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002888 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002889 BUG_ON(!domain);
2890
Weidong Han8c11e792008-12-08 15:29:22 +08002891 iommu = domain_get_iommu(domain);
2892
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002893 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002894 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2895 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002896 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002897
David Woodhoused794dc92009-06-28 00:27:49 +01002898 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2899 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002900
David Woodhoused794dc92009-06-28 00:27:49 +01002901 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2902 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002903
2904 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002905 dma_pte_clear_range(domain, start_pfn, last_pfn);
2906
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002907 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002908 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2909
mark gross5e0d2a62008-03-04 15:22:08 -08002910 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002911 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002912 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002913 /* free iova */
2914 __free_iova(&domain->iovad, iova);
2915 } else {
2916 add_unmap(domain, iova);
2917 /*
2918 * queue up the release of the unmap to save the 1/6th of the
2919 * cpu used up by the iotlb flush operation...
2920 */
mark gross5e0d2a62008-03-04 15:22:08 -08002921 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002922}
2923
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002924static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2925 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002926{
2927 void *vaddr;
2928 int order;
2929
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002930 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002931 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002932
2933 if (!iommu_no_mapping(hwdev))
2934 flags &= ~(GFP_DMA | GFP_DMA32);
2935 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2936 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2937 flags |= GFP_DMA;
2938 else
2939 flags |= GFP_DMA32;
2940 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002941
2942 vaddr = (void *)__get_free_pages(flags, order);
2943 if (!vaddr)
2944 return NULL;
2945 memset(vaddr, 0, size);
2946
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002947 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2948 DMA_BIDIRECTIONAL,
2949 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002950 if (*dma_handle)
2951 return vaddr;
2952 free_pages((unsigned long)vaddr, order);
2953 return NULL;
2954}
2955
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002956static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2957 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002958{
2959 int order;
2960
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002961 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002962 order = get_order(size);
2963
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002964 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 free_pages((unsigned long)vaddr, order);
2966}
2967
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002968static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2969 int nelems, enum dma_data_direction dir,
2970 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002971{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002972 struct pci_dev *pdev = to_pci_dev(hwdev);
2973 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002974 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002975 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002976 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002977
David Woodhouse73676832009-07-04 14:08:36 +01002978 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002979 return;
2980
2981 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002982 BUG_ON(!domain);
2983
2984 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002985
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002986 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002987 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2988 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002989 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002990
David Woodhoused794dc92009-06-28 00:27:49 +01002991 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2992 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002993
2994 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002995 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002996
David Woodhoused794dc92009-06-28 00:27:49 +01002997 /* free page tables */
2998 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2999
David Woodhouseacea0012009-07-14 01:55:11 +01003000 if (intel_iommu_strict) {
3001 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003002 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003003 /* free iova */
3004 __free_iova(&domain->iovad, iova);
3005 } else {
3006 add_unmap(domain, iova);
3007 /*
3008 * queue up the release of the unmap to save the 1/6th of the
3009 * cpu used up by the iotlb flush operation...
3010 */
3011 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003012}
3013
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003014static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003015 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003016{
3017 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003018 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003020 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003021 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003022 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003023 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003024 }
3025 return nelems;
3026}
3027
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003028static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3029 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003031 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003032 struct pci_dev *pdev = to_pci_dev(hwdev);
3033 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003034 size_t size = 0;
3035 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003036 struct iova *iova = NULL;
3037 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003038 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003039 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003040 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003041
3042 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003043 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003044 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003045
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003046 domain = get_valid_domain_for_dev(pdev);
3047 if (!domain)
3048 return 0;
3049
Weidong Han8c11e792008-12-08 15:29:22 +08003050 iommu = domain_get_iommu(domain);
3051
David Woodhouseb536d242009-06-28 14:49:31 +01003052 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003053 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003054
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003055 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3056 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003057 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003058 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003059 return 0;
3060 }
3061
3062 /*
3063 * Check if DMAR supports zero-length reads on write only
3064 * mappings..
3065 */
3066 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003067 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003068 prot |= DMA_PTE_READ;
3069 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3070 prot |= DMA_PTE_WRITE;
3071
David Woodhouseb536d242009-06-28 14:49:31 +01003072 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003073
Fenghua Yuf5329592009-08-04 15:09:37 -07003074 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003075 if (unlikely(ret)) {
3076 /* clear the page */
3077 dma_pte_clear_range(domain, start_vpfn,
3078 start_vpfn + size - 1);
3079 /* free page tables */
3080 dma_pte_free_pagetable(domain, start_vpfn,
3081 start_vpfn + size - 1);
3082 /* free iova */
3083 __free_iova(&domain->iovad, iova);
3084 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003085 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003086
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003087 /* it's a non-present to present mapping. Only flush if caching mode */
3088 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003089 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003090 else
Weidong Han8c11e792008-12-08 15:29:22 +08003091 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003092
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003093 return nelems;
3094}
3095
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003096static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3097{
3098 return !dma_addr;
3099}
3100
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003101struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003102 .alloc_coherent = intel_alloc_coherent,
3103 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003104 .map_sg = intel_map_sg,
3105 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003106 .map_page = intel_map_page,
3107 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003108 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003109};
3110
3111static inline int iommu_domain_cache_init(void)
3112{
3113 int ret = 0;
3114
3115 iommu_domain_cache = kmem_cache_create("iommu_domain",
3116 sizeof(struct dmar_domain),
3117 0,
3118 SLAB_HWCACHE_ALIGN,
3119
3120 NULL);
3121 if (!iommu_domain_cache) {
3122 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3123 ret = -ENOMEM;
3124 }
3125
3126 return ret;
3127}
3128
3129static inline int iommu_devinfo_cache_init(void)
3130{
3131 int ret = 0;
3132
3133 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3134 sizeof(struct device_domain_info),
3135 0,
3136 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003137 NULL);
3138 if (!iommu_devinfo_cache) {
3139 printk(KERN_ERR "Couldn't create devinfo cache\n");
3140 ret = -ENOMEM;
3141 }
3142
3143 return ret;
3144}
3145
3146static inline int iommu_iova_cache_init(void)
3147{
3148 int ret = 0;
3149
3150 iommu_iova_cache = kmem_cache_create("iommu_iova",
3151 sizeof(struct iova),
3152 0,
3153 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003154 NULL);
3155 if (!iommu_iova_cache) {
3156 printk(KERN_ERR "Couldn't create iova cache\n");
3157 ret = -ENOMEM;
3158 }
3159
3160 return ret;
3161}
3162
3163static int __init iommu_init_mempool(void)
3164{
3165 int ret;
3166 ret = iommu_iova_cache_init();
3167 if (ret)
3168 return ret;
3169
3170 ret = iommu_domain_cache_init();
3171 if (ret)
3172 goto domain_error;
3173
3174 ret = iommu_devinfo_cache_init();
3175 if (!ret)
3176 return ret;
3177
3178 kmem_cache_destroy(iommu_domain_cache);
3179domain_error:
3180 kmem_cache_destroy(iommu_iova_cache);
3181
3182 return -ENOMEM;
3183}
3184
3185static void __init iommu_exit_mempool(void)
3186{
3187 kmem_cache_destroy(iommu_devinfo_cache);
3188 kmem_cache_destroy(iommu_domain_cache);
3189 kmem_cache_destroy(iommu_iova_cache);
3190
3191}
3192
Dan Williams556ab452010-07-23 15:47:56 -07003193static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3194{
3195 struct dmar_drhd_unit *drhd;
3196 u32 vtbar;
3197 int rc;
3198
3199 /* We know that this device on this chipset has its own IOMMU.
3200 * If we find it under a different IOMMU, then the BIOS is lying
3201 * to us. Hope that the IOMMU for this device is actually
3202 * disabled, and it needs no translation...
3203 */
3204 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3205 if (rc) {
3206 /* "can't" happen */
3207 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3208 return;
3209 }
3210 vtbar &= 0xffff0000;
3211
3212 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3213 drhd = dmar_find_matched_drhd_unit(pdev);
3214 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3215 TAINT_FIRMWARE_WORKAROUND,
3216 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3217 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3218}
3219DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3220
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003221static void __init init_no_remapping_devices(void)
3222{
3223 struct dmar_drhd_unit *drhd;
3224
3225 for_each_drhd_unit(drhd) {
3226 if (!drhd->include_all) {
3227 int i;
3228 for (i = 0; i < drhd->devices_cnt; i++)
3229 if (drhd->devices[i] != NULL)
3230 break;
3231 /* ignore DMAR unit if no pci devices exist */
3232 if (i == drhd->devices_cnt)
3233 drhd->ignored = 1;
3234 }
3235 }
3236
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003237 for_each_drhd_unit(drhd) {
3238 int i;
3239 if (drhd->ignored || drhd->include_all)
3240 continue;
3241
3242 for (i = 0; i < drhd->devices_cnt; i++)
3243 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003244 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003245 break;
3246
3247 if (i < drhd->devices_cnt)
3248 continue;
3249
David Woodhousec0771df2011-10-14 20:59:46 +01003250 /* This IOMMU has *only* gfx devices. Either bypass it or
3251 set the gfx_mapped flag, as appropriate */
3252 if (dmar_map_gfx) {
3253 intel_iommu_gfx_mapped = 1;
3254 } else {
3255 drhd->ignored = 1;
3256 for (i = 0; i < drhd->devices_cnt; i++) {
3257 if (!drhd->devices[i])
3258 continue;
3259 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3260 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003261 }
3262 }
3263}
3264
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003265#ifdef CONFIG_SUSPEND
3266static int init_iommu_hw(void)
3267{
3268 struct dmar_drhd_unit *drhd;
3269 struct intel_iommu *iommu = NULL;
3270
3271 for_each_active_iommu(iommu, drhd)
3272 if (iommu->qi)
3273 dmar_reenable_qi(iommu);
3274
Joseph Cihulab7792602011-05-03 00:08:37 -07003275 for_each_iommu(iommu, drhd) {
3276 if (drhd->ignored) {
3277 /*
3278 * we always have to disable PMRs or DMA may fail on
3279 * this device
3280 */
3281 if (force_on)
3282 iommu_disable_protect_mem_regions(iommu);
3283 continue;
3284 }
3285
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003286 iommu_flush_write_buffer(iommu);
3287
3288 iommu_set_root_entry(iommu);
3289
3290 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003291 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003292 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003293 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003294 if (iommu_enable_translation(iommu))
3295 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003296 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003297 }
3298
3299 return 0;
3300}
3301
3302static void iommu_flush_all(void)
3303{
3304 struct dmar_drhd_unit *drhd;
3305 struct intel_iommu *iommu;
3306
3307 for_each_active_iommu(iommu, drhd) {
3308 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003309 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003310 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003311 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003312 }
3313}
3314
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003315static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003316{
3317 struct dmar_drhd_unit *drhd;
3318 struct intel_iommu *iommu = NULL;
3319 unsigned long flag;
3320
3321 for_each_active_iommu(iommu, drhd) {
3322 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3323 GFP_ATOMIC);
3324 if (!iommu->iommu_state)
3325 goto nomem;
3326 }
3327
3328 iommu_flush_all();
3329
3330 for_each_active_iommu(iommu, drhd) {
3331 iommu_disable_translation(iommu);
3332
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003333 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003334
3335 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3336 readl(iommu->reg + DMAR_FECTL_REG);
3337 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3338 readl(iommu->reg + DMAR_FEDATA_REG);
3339 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3340 readl(iommu->reg + DMAR_FEADDR_REG);
3341 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3342 readl(iommu->reg + DMAR_FEUADDR_REG);
3343
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003344 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003345 }
3346 return 0;
3347
3348nomem:
3349 for_each_active_iommu(iommu, drhd)
3350 kfree(iommu->iommu_state);
3351
3352 return -ENOMEM;
3353}
3354
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003355static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003356{
3357 struct dmar_drhd_unit *drhd;
3358 struct intel_iommu *iommu = NULL;
3359 unsigned long flag;
3360
3361 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003362 if (force_on)
3363 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3364 else
3365 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003366 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003367 }
3368
3369 for_each_active_iommu(iommu, drhd) {
3370
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003371 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003372
3373 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3374 iommu->reg + DMAR_FECTL_REG);
3375 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3376 iommu->reg + DMAR_FEDATA_REG);
3377 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3378 iommu->reg + DMAR_FEADDR_REG);
3379 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3380 iommu->reg + DMAR_FEUADDR_REG);
3381
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003382 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003383 }
3384
3385 for_each_active_iommu(iommu, drhd)
3386 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003387}
3388
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003389static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003390 .resume = iommu_resume,
3391 .suspend = iommu_suspend,
3392};
3393
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003394static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003395{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003396 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003397}
3398
3399#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003400static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003401#endif /* CONFIG_PM */
3402
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003403LIST_HEAD(dmar_rmrr_units);
3404
3405static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3406{
3407 list_add(&rmrr->list, &dmar_rmrr_units);
3408}
3409
3410
3411int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3412{
3413 struct acpi_dmar_reserved_memory *rmrr;
3414 struct dmar_rmrr_unit *rmrru;
3415
3416 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3417 if (!rmrru)
3418 return -ENOMEM;
3419
3420 rmrru->hdr = header;
3421 rmrr = (struct acpi_dmar_reserved_memory *)header;
3422 rmrru->base_address = rmrr->base_address;
3423 rmrru->end_address = rmrr->end_address;
3424
3425 dmar_register_rmrr_unit(rmrru);
3426 return 0;
3427}
3428
3429static int __init
3430rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3431{
3432 struct acpi_dmar_reserved_memory *rmrr;
3433 int ret;
3434
3435 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3436 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3437 ((void *)rmrr) + rmrr->header.length,
3438 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3439
3440 if (ret || (rmrru->devices_cnt == 0)) {
3441 list_del(&rmrru->list);
3442 kfree(rmrru);
3443 }
3444 return ret;
3445}
3446
3447static LIST_HEAD(dmar_atsr_units);
3448
3449int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3450{
3451 struct acpi_dmar_atsr *atsr;
3452 struct dmar_atsr_unit *atsru;
3453
3454 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3455 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3456 if (!atsru)
3457 return -ENOMEM;
3458
3459 atsru->hdr = hdr;
3460 atsru->include_all = atsr->flags & 0x1;
3461
3462 list_add(&atsru->list, &dmar_atsr_units);
3463
3464 return 0;
3465}
3466
3467static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3468{
3469 int rc;
3470 struct acpi_dmar_atsr *atsr;
3471
3472 if (atsru->include_all)
3473 return 0;
3474
3475 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3476 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3477 (void *)atsr + atsr->header.length,
3478 &atsru->devices_cnt, &atsru->devices,
3479 atsr->segment);
3480 if (rc || !atsru->devices_cnt) {
3481 list_del(&atsru->list);
3482 kfree(atsru);
3483 }
3484
3485 return rc;
3486}
3487
3488int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3489{
3490 int i;
3491 struct pci_bus *bus;
3492 struct acpi_dmar_atsr *atsr;
3493 struct dmar_atsr_unit *atsru;
3494
3495 dev = pci_physfn(dev);
3496
3497 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3498 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3499 if (atsr->segment == pci_domain_nr(dev->bus))
3500 goto found;
3501 }
3502
3503 return 0;
3504
3505found:
3506 for (bus = dev->bus; bus; bus = bus->parent) {
3507 struct pci_dev *bridge = bus->self;
3508
3509 if (!bridge || !pci_is_pcie(bridge) ||
3510 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3511 return 0;
3512
3513 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3514 for (i = 0; i < atsru->devices_cnt; i++)
3515 if (atsru->devices[i] == bridge)
3516 return 1;
3517 break;
3518 }
3519 }
3520
3521 if (atsru->include_all)
3522 return 1;
3523
3524 return 0;
3525}
3526
3527int dmar_parse_rmrr_atsr_dev(void)
3528{
3529 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3530 struct dmar_atsr_unit *atsr, *atsr_n;
3531 int ret = 0;
3532
3533 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3534 ret = rmrr_parse_dev(rmrr);
3535 if (ret)
3536 return ret;
3537 }
3538
3539 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3540 ret = atsr_parse_dev(atsr);
3541 if (ret)
3542 return ret;
3543 }
3544
3545 return ret;
3546}
3547
Fenghua Yu99dcade2009-11-11 07:23:06 -08003548/*
3549 * Here we only respond to action of unbound device from driver.
3550 *
3551 * Added device is not attached to its DMAR domain here yet. That will happen
3552 * when mapping the device to iova.
3553 */
3554static int device_notifier(struct notifier_block *nb,
3555 unsigned long action, void *data)
3556{
3557 struct device *dev = data;
3558 struct pci_dev *pdev = to_pci_dev(dev);
3559 struct dmar_domain *domain;
3560
David Woodhouse44cd6132009-12-02 10:18:30 +00003561 if (iommu_no_mapping(dev))
3562 return 0;
3563
Fenghua Yu99dcade2009-11-11 07:23:06 -08003564 domain = find_domain(pdev);
3565 if (!domain)
3566 return 0;
3567
Alex Williamsona97590e2011-03-04 14:52:16 -07003568 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003569 domain_remove_one_dev_info(domain, pdev);
3570
Alex Williamsona97590e2011-03-04 14:52:16 -07003571 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3572 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3573 list_empty(&domain->devices))
3574 domain_exit(domain);
3575 }
3576
Fenghua Yu99dcade2009-11-11 07:23:06 -08003577 return 0;
3578}
3579
3580static struct notifier_block device_nb = {
3581 .notifier_call = device_notifier,
3582};
3583
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003584int __init intel_iommu_init(void)
3585{
3586 int ret = 0;
3587
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003588 /* VT-d is required for a TXT/tboot launch, so enforce that */
3589 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003590
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003591 if (dmar_table_init()) {
3592 if (force_on)
3593 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003594 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003595 }
3596
Suresh Siddhac2c72862011-08-23 17:05:19 -07003597 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003598 if (force_on)
3599 panic("tboot: Failed to initialize DMAR device scope\n");
3600 return -ENODEV;
3601 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003602
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003603 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003604 return -ENODEV;
3605
Joseph Cihula51a63e62011-03-21 11:04:24 -07003606 if (iommu_init_mempool()) {
3607 if (force_on)
3608 panic("tboot: Failed to initialize iommu memory\n");
3609 return -ENODEV;
3610 }
3611
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003612 if (list_empty(&dmar_rmrr_units))
3613 printk(KERN_INFO "DMAR: No RMRR found\n");
3614
3615 if (list_empty(&dmar_atsr_units))
3616 printk(KERN_INFO "DMAR: No ATSR found\n");
3617
Joseph Cihula51a63e62011-03-21 11:04:24 -07003618 if (dmar_init_reserved_ranges()) {
3619 if (force_on)
3620 panic("tboot: Failed to reserve iommu ranges\n");
3621 return -ENODEV;
3622 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003623
3624 init_no_remapping_devices();
3625
Joseph Cihulab7792602011-05-03 00:08:37 -07003626 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003627 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003628 if (force_on)
3629 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003630 printk(KERN_ERR "IOMMU: dmar init failed\n");
3631 put_iova_domain(&reserved_iova_list);
3632 iommu_exit_mempool();
3633 return ret;
3634 }
3635 printk(KERN_INFO
3636 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3637
mark gross5e0d2a62008-03-04 15:22:08 -08003638 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003639#ifdef CONFIG_SWIOTLB
3640 swiotlb = 0;
3641#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003642 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003643
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003644 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003645
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003646 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003647
Fenghua Yu99dcade2009-11-11 07:23:06 -08003648 bus_register_notifier(&pci_bus_type, &device_nb);
3649
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003650 return 0;
3651}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003652
Han, Weidong3199aa62009-02-26 17:31:12 +08003653static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3654 struct pci_dev *pdev)
3655{
3656 struct pci_dev *tmp, *parent;
3657
3658 if (!iommu || !pdev)
3659 return;
3660
3661 /* dependent device detach */
3662 tmp = pci_find_upstream_pcie_bridge(pdev);
3663 /* Secondary interface's bus number and devfn 0 */
3664 if (tmp) {
3665 parent = pdev->bus->self;
3666 while (parent != tmp) {
3667 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003668 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003669 parent = parent->bus->self;
3670 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003671 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003672 iommu_detach_dev(iommu,
3673 tmp->subordinate->number, 0);
3674 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003675 iommu_detach_dev(iommu, tmp->bus->number,
3676 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003677 }
3678}
3679
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003680static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003681 struct pci_dev *pdev)
3682{
3683 struct device_domain_info *info;
3684 struct intel_iommu *iommu;
3685 unsigned long flags;
3686 int found = 0;
3687 struct list_head *entry, *tmp;
3688
David Woodhouse276dbf992009-04-04 01:45:37 +01003689 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3690 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003691 if (!iommu)
3692 return;
3693
3694 spin_lock_irqsave(&device_domain_lock, flags);
3695 list_for_each_safe(entry, tmp, &domain->devices) {
3696 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003697 if (info->segment == pci_domain_nr(pdev->bus) &&
3698 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003699 info->devfn == pdev->devfn) {
3700 list_del(&info->link);
3701 list_del(&info->global);
3702 if (info->dev)
3703 info->dev->dev.archdata.iommu = NULL;
3704 spin_unlock_irqrestore(&device_domain_lock, flags);
3705
Yu Zhao93a23a72009-05-18 13:51:37 +08003706 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003707 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003708 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003709 free_devinfo_mem(info);
3710
3711 spin_lock_irqsave(&device_domain_lock, flags);
3712
3713 if (found)
3714 break;
3715 else
3716 continue;
3717 }
3718
3719 /* if there is no other devices under the same iommu
3720 * owned by this domain, clear this iommu in iommu_bmp
3721 * update iommu count and coherency
3722 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003723 if (iommu == device_to_iommu(info->segment, info->bus,
3724 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003725 found = 1;
3726 }
3727
Roland Dreier3e7abe22011-07-20 06:22:21 -07003728 spin_unlock_irqrestore(&device_domain_lock, flags);
3729
Weidong Hanc7151a82008-12-08 22:51:37 +08003730 if (found == 0) {
3731 unsigned long tmp_flags;
3732 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3733 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3734 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003735 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003736 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003737
Alex Williamson9b4554b2011-05-24 12:19:04 -04003738 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3739 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3740 spin_lock_irqsave(&iommu->lock, tmp_flags);
3741 clear_bit(domain->id, iommu->domain_ids);
3742 iommu->domains[domain->id] = NULL;
3743 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3744 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003745 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003746}
3747
3748static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3749{
3750 struct device_domain_info *info;
3751 struct intel_iommu *iommu;
3752 unsigned long flags1, flags2;
3753
3754 spin_lock_irqsave(&device_domain_lock, flags1);
3755 while (!list_empty(&domain->devices)) {
3756 info = list_entry(domain->devices.next,
3757 struct device_domain_info, link);
3758 list_del(&info->link);
3759 list_del(&info->global);
3760 if (info->dev)
3761 info->dev->dev.archdata.iommu = NULL;
3762
3763 spin_unlock_irqrestore(&device_domain_lock, flags1);
3764
Yu Zhao93a23a72009-05-18 13:51:37 +08003765 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003766 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003767 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003768 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003769
3770 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003771 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003772 */
3773 spin_lock_irqsave(&domain->iommu_lock, flags2);
3774 if (test_and_clear_bit(iommu->seq_id,
3775 &domain->iommu_bmp)) {
3776 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003777 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003778 }
3779 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3780
3781 free_devinfo_mem(info);
3782 spin_lock_irqsave(&device_domain_lock, flags1);
3783 }
3784 spin_unlock_irqrestore(&device_domain_lock, flags1);
3785}
3786
Weidong Han5e98c4b2008-12-08 23:03:27 +08003787/* domain id for virtual machine, it won't be set in context */
3788static unsigned long vm_domid;
3789
3790static struct dmar_domain *iommu_alloc_vm_domain(void)
3791{
3792 struct dmar_domain *domain;
3793
3794 domain = alloc_domain_mem();
3795 if (!domain)
3796 return NULL;
3797
3798 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003799 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003800 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3801 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3802
3803 return domain;
3804}
3805
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003806static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003807{
3808 int adjust_width;
3809
3810 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003811 spin_lock_init(&domain->iommu_lock);
3812
3813 domain_reserve_special_ranges(domain);
3814
3815 /* calculate AGAW */
3816 domain->gaw = guest_width;
3817 adjust_width = guestwidth_to_adjustwidth(guest_width);
3818 domain->agaw = width_to_agaw(adjust_width);
3819
3820 INIT_LIST_HEAD(&domain->devices);
3821
3822 domain->iommu_count = 0;
3823 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003824 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003825 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003826 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003827 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003828
3829 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003830 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003831 if (!domain->pgd)
3832 return -ENOMEM;
3833 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3834 return 0;
3835}
3836
3837static void iommu_free_vm_domain(struct dmar_domain *domain)
3838{
3839 unsigned long flags;
3840 struct dmar_drhd_unit *drhd;
3841 struct intel_iommu *iommu;
3842 unsigned long i;
3843 unsigned long ndomains;
3844
3845 for_each_drhd_unit(drhd) {
3846 if (drhd->ignored)
3847 continue;
3848 iommu = drhd->iommu;
3849
3850 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003851 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003852 if (iommu->domains[i] == domain) {
3853 spin_lock_irqsave(&iommu->lock, flags);
3854 clear_bit(i, iommu->domain_ids);
3855 iommu->domains[i] = NULL;
3856 spin_unlock_irqrestore(&iommu->lock, flags);
3857 break;
3858 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003859 }
3860 }
3861}
3862
3863static void vm_domain_exit(struct dmar_domain *domain)
3864{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003865 /* Domain 0 is reserved, so dont process it */
3866 if (!domain)
3867 return;
3868
3869 vm_domain_remove_all_dev_info(domain);
3870 /* destroy iovas */
3871 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003872
3873 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003874 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003875
3876 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003877 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003878
3879 iommu_free_vm_domain(domain);
3880 free_domain_mem(domain);
3881}
3882
Joerg Roedel5d450802008-12-03 14:52:32 +01003883static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003884{
Joerg Roedel5d450802008-12-03 14:52:32 +01003885 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003886
Joerg Roedel5d450802008-12-03 14:52:32 +01003887 dmar_domain = iommu_alloc_vm_domain();
3888 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003889 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003890 "intel_iommu_domain_init: dmar_domain == NULL\n");
3891 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003892 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003893 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003894 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003895 "intel_iommu_domain_init() failed\n");
3896 vm_domain_exit(dmar_domain);
3897 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003898 }
Allen Kay8140a952011-10-14 12:32:17 -07003899 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003900 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003901
Joerg Roedel5d450802008-12-03 14:52:32 +01003902 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003903}
Kay, Allen M38717942008-09-09 18:37:29 +03003904
Joerg Roedel5d450802008-12-03 14:52:32 +01003905static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003906{
Joerg Roedel5d450802008-12-03 14:52:32 +01003907 struct dmar_domain *dmar_domain = domain->priv;
3908
3909 domain->priv = NULL;
3910 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003911}
Kay, Allen M38717942008-09-09 18:37:29 +03003912
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003913static int intel_iommu_attach_device(struct iommu_domain *domain,
3914 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003915{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003916 struct dmar_domain *dmar_domain = domain->priv;
3917 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003918 struct intel_iommu *iommu;
3919 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003920
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003921 /* normally pdev is not mapped */
3922 if (unlikely(domain_context_mapped(pdev))) {
3923 struct dmar_domain *old_domain;
3924
3925 old_domain = find_domain(pdev);
3926 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003927 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3928 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3929 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003930 else
3931 domain_remove_dev_info(old_domain);
3932 }
3933 }
3934
David Woodhouse276dbf992009-04-04 01:45:37 +01003935 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3936 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003937 if (!iommu)
3938 return -ENODEV;
3939
3940 /* check if this iommu agaw is sufficient for max mapped address */
3941 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003942 if (addr_width > cap_mgaw(iommu->cap))
3943 addr_width = cap_mgaw(iommu->cap);
3944
3945 if (dmar_domain->max_addr > (1LL << addr_width)) {
3946 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003947 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003948 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003949 return -EFAULT;
3950 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003951 dmar_domain->gaw = addr_width;
3952
3953 /*
3954 * Knock out extra levels of page tables if necessary
3955 */
3956 while (iommu->agaw < dmar_domain->agaw) {
3957 struct dma_pte *pte;
3958
3959 pte = dmar_domain->pgd;
3960 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003961 dmar_domain->pgd = (struct dma_pte *)
3962 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003963 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003964 }
3965 dmar_domain->agaw--;
3966 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003967
David Woodhouse5fe60f42009-08-09 10:53:41 +01003968 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003969}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003970
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003971static void intel_iommu_detach_device(struct iommu_domain *domain,
3972 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003973{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003974 struct dmar_domain *dmar_domain = domain->priv;
3975 struct pci_dev *pdev = to_pci_dev(dev);
3976
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003977 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003978}
Kay, Allen M38717942008-09-09 18:37:29 +03003979
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003980static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003983{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003984 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003985 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003986 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003987 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003988 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003989
Joerg Roedeldde57a22008-12-03 15:04:09 +01003990 if (iommu_prot & IOMMU_READ)
3991 prot |= DMA_PTE_READ;
3992 if (iommu_prot & IOMMU_WRITE)
3993 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003996
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003997 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01003998 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003999 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004000 u64 end;
4001
4002 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004003 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004004 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004005 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004006 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004007 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004008 return -EFAULT;
4009 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004010 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004011 }
David Woodhousead051222009-06-28 14:22:28 +01004012 /* Round up size to next multiple of PAGE_SIZE, if it and
4013 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004014 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004015 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4016 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004017 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004018}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004019
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004020static int intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004022{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004023 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004024 size_t size = PAGE_SIZE << gfp_order;
Allen Kay292827c2011-10-14 12:31:54 -07004025 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004026
Allen Kay292827c2011-10-14 12:31:54 -07004027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004028 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004029
David Woodhouse163cc522009-06-28 00:51:17 +01004030 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004032
Allen Kay292827c2011-10-14 12:31:54 -07004033 return order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004034}
Kay, Allen M38717942008-09-09 18:37:29 +03004035
Joerg Roedeld14d6572008-12-03 15:06:57 +01004036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4037 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004038{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004039 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004040 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004041 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004042
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004043 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004044 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004045 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004046
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004047 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004048}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004049
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004050static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4051 unsigned long cap)
4052{
4053 struct dmar_domain *dmar_domain = domain->priv;
4054
4055 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4056 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004057 if (cap == IOMMU_CAP_INTR_REMAP)
4058 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004059
4060 return 0;
4061}
4062
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004063static struct iommu_ops intel_iommu_ops = {
4064 .domain_init = intel_iommu_domain_init,
4065 .domain_destroy = intel_iommu_domain_destroy,
4066 .attach_dev = intel_iommu_attach_device,
4067 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004068 .map = intel_iommu_map,
4069 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004070 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004071 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004072};
David Woodhouse9af88142009-02-13 23:18:03 +00004073
4074static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4075{
4076 /*
4077 * Mobile 4 Series Chipset neglects to set RWBF capability,
4078 * but needs it:
4079 */
4080 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4081 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004082
4083 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4084 if (dev->revision == 0x07) {
4085 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4086 dmar_map_gfx = 0;
4087 }
David Woodhouse9af88142009-02-13 23:18:03 +00004088}
4089
4090DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004091
Adam Jacksoneecfd572010-08-25 21:17:34 +01004092#define GGC 0x52
4093#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4094#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4095#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4096#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4097#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4098#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4099#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4100#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4101
David Woodhouse9eecabc2010-09-21 22:28:23 +01004102static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4103{
4104 unsigned short ggc;
4105
Adam Jacksoneecfd572010-08-25 21:17:34 +01004106 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004107 return;
4108
Adam Jacksoneecfd572010-08-25 21:17:34 +01004109 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004110 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4111 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004112 } else if (dmar_map_gfx) {
4113 /* we have to ensure the gfx device is idle before we flush */
4114 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4115 intel_iommu_strict = 1;
4116 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004117}
4118DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4119DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4120DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4121DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4122
David Woodhousee0fc7e02009-09-30 09:12:17 -07004123/* On Tylersburg chipsets, some BIOSes have been known to enable the
4124 ISOCH DMAR unit for the Azalia sound device, but not give it any
4125 TLB entries, which causes it to deadlock. Check for that. We do
4126 this in a function called from init_dmars(), instead of in a PCI
4127 quirk, because we don't want to print the obnoxious "BIOS broken"
4128 message if VT-d is actually disabled.
4129*/
4130static void __init check_tylersburg_isoch(void)
4131{
4132 struct pci_dev *pdev;
4133 uint32_t vtisochctrl;
4134
4135 /* If there's no Azalia in the system anyway, forget it. */
4136 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4137 if (!pdev)
4138 return;
4139 pci_dev_put(pdev);
4140
4141 /* System Management Registers. Might be hidden, in which case
4142 we can't do the sanity check. But that's OK, because the
4143 known-broken BIOSes _don't_ actually hide it, so far. */
4144 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4145 if (!pdev)
4146 return;
4147
4148 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4149 pci_dev_put(pdev);
4150 return;
4151 }
4152
4153 pci_dev_put(pdev);
4154
4155 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4156 if (vtisochctrl & 1)
4157 return;
4158
4159 /* Drop all bits other than the number of TLB entries */
4160 vtisochctrl &= 0x1c;
4161
4162 /* If we have the recommended number of TLB entries (16), fine. */
4163 if (vtisochctrl == 0x10)
4164 return;
4165
4166 /* Zero TLB entries? You get to ride the short bus to school. */
4167 if (!vtisochctrl) {
4168 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4169 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4170 dmi_get_system_info(DMI_BIOS_VENDOR),
4171 dmi_get_system_info(DMI_BIOS_VERSION),
4172 dmi_get_system_info(DMI_PRODUCT_VERSION));
4173 iommu_identity_mapping |= IDENTMAP_AZALIA;
4174 return;
4175 }
4176
4177 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4178 vtisochctrl);
4179}