blob: 8dc19b8f5d45c533b75ad81189602de42fb2f4ef [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Fenghua Yu5b6985c2008-10-16 18:02:32 -070047#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE
49
Mike Travis825507d2011-05-28 13:15:06 -050050#define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
63
David Woodhouse2ebe3152009-09-19 07:34:04 -070064#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070072
Mark McLoughlinf27be032008-11-20 15:49:43 +000073#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070074#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070075#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080076
Andrew Mortondf08cdc2010-09-22 13:05:11 -070077/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
81static inline int agaw_to_level(int agaw)
82{
83 return agaw + 2;
84}
85
86static inline int agaw_to_width(int agaw)
87{
88 return 30 + agaw * LEVEL_STRIDE;
89}
90
91static inline int width_to_agaw(int width)
92{
93 return (width - 30) / LEVEL_STRIDE;
94}
95
96static inline unsigned int level_to_offset_bits(int level)
97{
98 return (level - 1) * LEVEL_STRIDE;
99}
100
101static inline int pfn_level_offset(unsigned long pfn, int level)
102{
103 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
104}
105
106static inline unsigned long level_mask(int level)
107{
108 return -1UL << level_to_offset_bits(level);
109}
110
111static inline unsigned long level_size(int level)
112{
113 return 1UL << level_to_offset_bits(level);
114}
115
116static inline unsigned long align_to_level(unsigned long pfn, int level)
117{
118 return (pfn + level_size(level) - 1) & level_mask(level);
119}
David Woodhousefd18de52009-05-10 23:57:41 +0100120
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100121static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
122{
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
124}
125
David Woodhousedd4e8312009-06-27 16:21:20 +0100126/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
127 are never going to work. */
128static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
129{
130 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
131}
132
133static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
134{
135 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
136}
137static inline unsigned long page_to_dma_pfn(struct page *pg)
138{
139 return mm_to_dma_pfn(page_to_pfn(pg));
140}
141static inline unsigned long virt_to_dma_pfn(void *p)
142{
143 return page_to_dma_pfn(virt_to_page(p));
144}
145
Weidong Hand9630fe2008-12-08 11:06:32 +0800146/* global iommu list, set NULL for ignored DMAR units */
147static struct intel_iommu **g_iommus;
148
David Woodhousee0fc7e02009-09-30 09:12:17 -0700149static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000150static int rwbf_quirk;
151
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000152/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
155 */
156static int force_on = 0;
157
158/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000159 * 0: Present
160 * 1-11: Reserved
161 * 12-63: Context Ptr (12 - (haw-1))
162 * 64-127: Reserved
163 */
164struct root_entry {
165 u64 val;
166 u64 rsvd1;
167};
168#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
169static inline bool root_present(struct root_entry *root)
170{
171 return (root->val & 1);
172}
173static inline void set_root_present(struct root_entry *root)
174{
175 root->val |= 1;
176}
177static inline void set_root_value(struct root_entry *root, unsigned long value)
178{
179 root->val |= value & VTD_PAGE_MASK;
180}
181
182static inline struct context_entry *
183get_context_addr_from_root(struct root_entry *root)
184{
185 return (struct context_entry *)
186 (root_present(root)?phys_to_virt(
187 root->val & VTD_PAGE_MASK) :
188 NULL);
189}
190
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000191/*
192 * low 64 bits:
193 * 0: present
194 * 1: fault processing disable
195 * 2-3: translation type
196 * 12-63: address space root
197 * high 64 bits:
198 * 0-2: address width
199 * 3-6: aval
200 * 8-23: domain id
201 */
202struct context_entry {
203 u64 lo;
204 u64 hi;
205};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000206
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000207static inline bool context_present(struct context_entry *context)
208{
209 return (context->lo & 1);
210}
211static inline void context_set_present(struct context_entry *context)
212{
213 context->lo |= 1;
214}
215
216static inline void context_set_fault_enable(struct context_entry *context)
217{
218 context->lo &= (((u64)-1) << 2) | 1;
219}
220
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000221static inline void context_set_translation_type(struct context_entry *context,
222 unsigned long value)
223{
224 context->lo &= (((u64)-1) << 4) | 3;
225 context->lo |= (value & 3) << 2;
226}
227
228static inline void context_set_address_root(struct context_entry *context,
229 unsigned long value)
230{
231 context->lo |= value & VTD_PAGE_MASK;
232}
233
234static inline void context_set_address_width(struct context_entry *context,
235 unsigned long value)
236{
237 context->hi |= value & 7;
238}
239
240static inline void context_set_domain_id(struct context_entry *context,
241 unsigned long value)
242{
243 context->hi |= (value & ((1 << 16) - 1)) << 8;
244}
245
246static inline void context_clear_entry(struct context_entry *context)
247{
248 context->lo = 0;
249 context->hi = 0;
250}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000251
Mark McLoughlin622ba122008-11-20 15:49:46 +0000252/*
253 * 0: readable
254 * 1: writable
255 * 2-6: reserved
256 * 7: super page
Sheng Yang9cf066972009-03-18 15:33:07 +0800257 * 8-10: available
258 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000259 * 12-63: Host physcial address
260 */
261struct dma_pte {
262 u64 val;
263};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000264
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000265static inline void dma_clear_pte(struct dma_pte *pte)
266{
267 pte->val = 0;
268}
269
270static inline void dma_set_pte_readable(struct dma_pte *pte)
271{
272 pte->val |= DMA_PTE_READ;
273}
274
275static inline void dma_set_pte_writable(struct dma_pte *pte)
276{
277 pte->val |= DMA_PTE_WRITE;
278}
279
Sheng Yang9cf066972009-03-18 15:33:07 +0800280static inline void dma_set_pte_snp(struct dma_pte *pte)
281{
282 pte->val |= DMA_PTE_SNP;
283}
284
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000285static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
286{
287 pte->val = (pte->val & ~3) | (prot & 3);
288}
289
290static inline u64 dma_pte_addr(struct dma_pte *pte)
291{
David Woodhousec85994e2009-07-01 19:21:24 +0100292#ifdef CONFIG_64BIT
293 return pte->val & VTD_PAGE_MASK;
294#else
295 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100296 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100297#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000298}
299
David Woodhousedd4e8312009-06-27 16:21:20 +0100300static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000301{
David Woodhousedd4e8312009-06-27 16:21:20 +0100302 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303}
304
305static inline bool dma_pte_present(struct dma_pte *pte)
306{
307 return (pte->val & 3) != 0;
308}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000309
Allen Kay4399c8b2011-10-14 12:32:46 -0700310static inline bool dma_pte_superpage(struct dma_pte *pte)
311{
312 return (pte->val & (1 << 7));
313}
314
David Woodhouse75e6bf92009-07-02 11:21:16 +0100315static inline int first_pte_in_page(struct dma_pte *pte)
316{
317 return !((unsigned long)pte & ~VTD_PAGE_MASK);
318}
319
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700320/*
321 * This domain is a statically identity mapping domain.
322 * 1. This domain creats a static 1:1 mapping to all usable memory.
323 * 2. It maps to each iommu if successful.
324 * 3. Each iommu mapps to this domain if successful.
325 */
David Woodhouse19943b02009-08-04 16:19:20 +0100326static struct dmar_domain *si_domain;
327static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700328
Weidong Han3b5410e2008-12-08 09:17:15 +0800329/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100330#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800331
Weidong Han1ce28fe2008-12-08 16:35:39 +0800332/* domain represents a virtual machine, more than one devices
333 * across iommus may be owned in one domain, e.g. kvm guest.
334 */
335#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
336
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700337/* si_domain contains mulitple devices */
338#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
339
Mark McLoughlin99126f72008-11-20 15:49:47 +0000340struct dmar_domain {
341 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700342 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800343 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000344
345 struct list_head devices; /* all devices' list */
346 struct iova_domain iovad; /* iova's that belong to this domain */
347
348 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000349 int gaw; /* max guest address width */
350
351 /* adjusted guest address width, 0 is level 2 30-bit */
352 int agaw;
353
Weidong Han3b5410e2008-12-08 09:17:15 +0800354 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800355
356 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800357 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800358 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100359 int iommu_superpage;/* Level of superpages supported:
360 0 == 4KiB (no superpages), 1 == 2MiB,
361 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800362 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800363 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000364};
365
Mark McLoughlina647dac2008-11-20 15:49:48 +0000366/* PCI domain-device relationship */
367struct device_domain_info {
368 struct list_head link; /* link to domain siblings */
369 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100370 int segment; /* PCI domain */
371 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000372 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500373 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800374 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000375 struct dmar_domain *domain; /* pointer to domain */
376};
377
mark gross5e0d2a62008-03-04 15:22:08 -0800378static void flush_unmaps_timeout(unsigned long data);
379
380DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
381
mark gross80b20dd2008-04-18 13:53:58 -0700382#define HIGH_WATER_MARK 250
383struct deferred_flush_tables {
384 int next;
385 struct iova *iova[HIGH_WATER_MARK];
386 struct dmar_domain *domain[HIGH_WATER_MARK];
387};
388
389static struct deferred_flush_tables *deferred_flush;
390
mark gross5e0d2a62008-03-04 15:22:08 -0800391/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800392static int g_num_of_iommus;
393
394static DEFINE_SPINLOCK(async_umap_flush_lock);
395static LIST_HEAD(unmaps_to_do);
396
397static int timer_on;
398static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800399
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700400static void domain_remove_dev_info(struct dmar_domain *domain);
401
Suresh Siddhad3f13812011-08-23 17:05:25 -0700402#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800403int dmar_disabled = 0;
404#else
405int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800407
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -0200408int intel_iommu_enabled = 0;
409EXPORT_SYMBOL_GPL(intel_iommu_enabled);
410
David Woodhouse2d9e6672010-06-15 10:57:57 +0100411static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700412static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800413static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100414static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700415
David Woodhousec0771df2011-10-14 20:59:46 +0100416int intel_iommu_gfx_mapped;
417EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
418
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700419#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
420static DEFINE_SPINLOCK(device_domain_lock);
421static LIST_HEAD(device_domain_list);
422
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100423static struct iommu_ops intel_iommu_ops;
424
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700425static int __init intel_iommu_setup(char *str)
426{
427 if (!str)
428 return -EINVAL;
429 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800430 if (!strncmp(str, "on", 2)) {
431 dmar_disabled = 0;
432 printk(KERN_INFO "Intel-IOMMU: enabled\n");
433 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800435 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700436 } else if (!strncmp(str, "igfx_off", 8)) {
437 dmar_map_gfx = 0;
438 printk(KERN_INFO
439 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700440 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800441 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700442 "Intel-IOMMU: Forcing DAC for PCI devices\n");
443 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800444 } else if (!strncmp(str, "strict", 6)) {
445 printk(KERN_INFO
446 "Intel-IOMMU: disable batched IOTLB flush\n");
447 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100448 } else if (!strncmp(str, "sp_off", 6)) {
449 printk(KERN_INFO
450 "Intel-IOMMU: disable supported super page\n");
451 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700452 }
453
454 str += strcspn(str, ",");
455 while (*str == ',')
456 str++;
457 }
458 return 0;
459}
460__setup("intel_iommu=", intel_iommu_setup);
461
462static struct kmem_cache *iommu_domain_cache;
463static struct kmem_cache *iommu_devinfo_cache;
464static struct kmem_cache *iommu_iova_cache;
465
Suresh Siddha4c923d42009-10-02 11:01:24 -0700466static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700467{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700468 struct page *page;
469 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700470
Suresh Siddha4c923d42009-10-02 11:01:24 -0700471 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
472 if (page)
473 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700474 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700475}
476
477static inline void free_pgtable_page(void *vaddr)
478{
479 free_page((unsigned long)vaddr);
480}
481
482static inline void *alloc_domain_mem(void)
483{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900484 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700485}
486
Kay, Allen M38717942008-09-09 18:37:29 +0300487static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700488{
489 kmem_cache_free(iommu_domain_cache, vaddr);
490}
491
492static inline void * alloc_devinfo_mem(void)
493{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900494 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700495}
496
497static inline void free_devinfo_mem(void *vaddr)
498{
499 kmem_cache_free(iommu_devinfo_cache, vaddr);
500}
501
502struct iova *alloc_iova_mem(void)
503{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900504 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700505}
506
507void free_iova_mem(struct iova *iova)
508{
509 kmem_cache_free(iommu_iova_cache, iova);
510}
511
Weidong Han1b573682008-12-08 15:34:06 +0800512
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700513static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800514{
515 unsigned long sagaw;
516 int agaw = -1;
517
518 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700519 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800520 agaw >= 0; agaw--) {
521 if (test_bit(agaw, &sagaw))
522 break;
523 }
524
525 return agaw;
526}
527
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700528/*
529 * Calculate max SAGAW for each iommu.
530 */
531int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
532{
533 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
534}
535
536/*
537 * calculate agaw for each iommu.
538 * "SAGAW" may be different across iommus, use a default agaw, and
539 * get a supported less agaw for iommus that don't support the default agaw.
540 */
541int iommu_calculate_agaw(struct intel_iommu *iommu)
542{
543 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
544}
545
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700546/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800547static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
548{
549 int iommu_id;
550
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700551 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800552 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700553 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800554
Weidong Han8c11e792008-12-08 15:29:22 +0800555 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
556 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
557 return NULL;
558
559 return g_iommus[iommu_id];
560}
561
Weidong Han8e6040972008-12-08 15:49:06 +0800562static void domain_update_iommu_coherency(struct dmar_domain *domain)
563{
564 int i;
565
566 domain->iommu_coherency = 1;
567
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800568 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800569 if (!ecap_coherent(g_iommus[i]->ecap)) {
570 domain->iommu_coherency = 0;
571 break;
572 }
Weidong Han8e6040972008-12-08 15:49:06 +0800573 }
574}
575
Sheng Yang58c610b2009-03-18 15:33:05 +0800576static void domain_update_iommu_snooping(struct dmar_domain *domain)
577{
578 int i;
579
580 domain->iommu_snooping = 1;
581
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800582 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800583 if (!ecap_sc_support(g_iommus[i]->ecap)) {
584 domain->iommu_snooping = 0;
585 break;
586 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800587 }
588}
589
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100590static void domain_update_iommu_superpage(struct dmar_domain *domain)
591{
Allen Kay8140a952011-10-14 12:32:17 -0700592 struct dmar_drhd_unit *drhd;
593 struct intel_iommu *iommu = NULL;
594 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100595
596 if (!intel_iommu_superpage) {
597 domain->iommu_superpage = 0;
598 return;
599 }
600
Allen Kay8140a952011-10-14 12:32:17 -0700601 /* set iommu_superpage to the smallest common denominator */
602 for_each_active_iommu(iommu, drhd) {
603 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100604 if (!mask) {
605 break;
606 }
607 }
608 domain->iommu_superpage = fls(mask);
609}
610
Sheng Yang58c610b2009-03-18 15:33:05 +0800611/* Some capabilities may be different across iommus */
612static void domain_update_iommu_cap(struct dmar_domain *domain)
613{
614 domain_update_iommu_coherency(domain);
615 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100616 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800617}
618
David Woodhouse276dbf992009-04-04 01:45:37 +0100619static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800620{
621 struct dmar_drhd_unit *drhd = NULL;
622 int i;
623
624 for_each_drhd_unit(drhd) {
625 if (drhd->ignored)
626 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100627 if (segment != drhd->segment)
628 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800629
David Woodhouse924b6232009-04-04 00:39:25 +0100630 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000631 if (drhd->devices[i] &&
632 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800633 drhd->devices[i]->devfn == devfn)
634 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700635 if (drhd->devices[i] &&
636 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100637 drhd->devices[i]->subordinate->number <= bus &&
638 drhd->devices[i]->subordinate->subordinate >= bus)
639 return drhd->iommu;
640 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800641
642 if (drhd->include_all)
643 return drhd->iommu;
644 }
645
646 return NULL;
647}
648
Weidong Han5331fe62008-12-08 23:00:00 +0800649static void domain_flush_cache(struct dmar_domain *domain,
650 void *addr, int size)
651{
652 if (!domain->iommu_coherency)
653 clflush_cache_range(addr, size);
654}
655
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700656/* Gets context entry for a given bus and devfn */
657static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
658 u8 bus, u8 devfn)
659{
660 struct root_entry *root;
661 struct context_entry *context;
662 unsigned long phy_addr;
663 unsigned long flags;
664
665 spin_lock_irqsave(&iommu->lock, flags);
666 root = &iommu->root_entry[bus];
667 context = get_context_addr_from_root(root);
668 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700669 context = (struct context_entry *)
670 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700671 if (!context) {
672 spin_unlock_irqrestore(&iommu->lock, flags);
673 return NULL;
674 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700675 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700676 phy_addr = virt_to_phys((void *)context);
677 set_root_value(root, phy_addr);
678 set_root_present(root);
679 __iommu_flush_cache(iommu, root, sizeof(*root));
680 }
681 spin_unlock_irqrestore(&iommu->lock, flags);
682 return &context[devfn];
683}
684
685static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
686{
687 struct root_entry *root;
688 struct context_entry *context;
689 int ret;
690 unsigned long flags;
691
692 spin_lock_irqsave(&iommu->lock, flags);
693 root = &iommu->root_entry[bus];
694 context = get_context_addr_from_root(root);
695 if (!context) {
696 ret = 0;
697 goto out;
698 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000699 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700700out:
701 spin_unlock_irqrestore(&iommu->lock, flags);
702 return ret;
703}
704
705static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
706{
707 struct root_entry *root;
708 struct context_entry *context;
709 unsigned long flags;
710
711 spin_lock_irqsave(&iommu->lock, flags);
712 root = &iommu->root_entry[bus];
713 context = get_context_addr_from_root(root);
714 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000715 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700716 __iommu_flush_cache(iommu, &context[devfn], \
717 sizeof(*context));
718 }
719 spin_unlock_irqrestore(&iommu->lock, flags);
720}
721
722static void free_context_table(struct intel_iommu *iommu)
723{
724 struct root_entry *root;
725 int i;
726 unsigned long flags;
727 struct context_entry *context;
728
729 spin_lock_irqsave(&iommu->lock, flags);
730 if (!iommu->root_entry) {
731 goto out;
732 }
733 for (i = 0; i < ROOT_ENTRY_NR; i++) {
734 root = &iommu->root_entry[i];
735 context = get_context_addr_from_root(root);
736 if (context)
737 free_pgtable_page(context);
738 }
739 free_pgtable_page(iommu->root_entry);
740 iommu->root_entry = NULL;
741out:
742 spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
David Woodhouseb026fd22009-06-28 10:37:25 +0100745static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700746 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700747{
David Woodhouseb026fd22009-06-28 10:37:25 +0100748 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700749 struct dma_pte *parent, *pte = NULL;
750 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700751 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700752
753 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100754 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700755 parent = domain->pgd;
756
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700757 while (level > 0) {
758 void *tmp_page;
759
David Woodhouseb026fd22009-06-28 10:37:25 +0100760 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700761 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700762 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100763 break;
764 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700765 break;
766
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000767 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100768 uint64_t pteval;
769
Suresh Siddha4c923d42009-10-02 11:01:24 -0700770 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700771
David Woodhouse206a73c12009-07-01 19:30:28 +0100772 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700773 return NULL;
David Woodhouse206a73c12009-07-01 19:30:28 +0100774
David Woodhousec85994e2009-07-01 19:21:24 +0100775 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400776 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100777 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
778 /* Someone else set it while we were thinking; use theirs. */
779 free_pgtable_page(tmp_page);
780 } else {
781 dma_pte_addr(pte);
782 domain_flush_cache(domain, pte, sizeof(*pte));
783 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700784 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000785 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786 level--;
787 }
788
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700789 return pte;
790}
791
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100792
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700793/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100794static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
795 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100796 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700797{
798 struct dma_pte *parent, *pte = NULL;
799 int total = agaw_to_level(domain->agaw);
800 int offset;
801
802 parent = domain->pgd;
803 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100804 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700805 pte = &parent[offset];
806 if (level == total)
807 return pte;
808
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100809 if (!dma_pte_present(pte)) {
810 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700811 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100812 }
813
814 if (pte->val & DMA_PTE_LARGE_PAGE) {
815 *large_page = total;
816 return pte;
817 }
818
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000819 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 total--;
821 }
822 return NULL;
823}
824
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700825/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700826static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100827 unsigned long start_pfn,
828 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700829{
David Woodhouse04b18e62009-06-27 19:15:01 +0100830 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100831 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100832 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700833 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700834
David Woodhouse04b18e62009-06-27 19:15:01 +0100835 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100836 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700837 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100838
David Woodhouse04b18e62009-06-27 19:15:01 +0100839 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700840 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100841 large_page = 1;
842 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100843 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100844 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100845 continue;
846 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100847 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100848 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100849 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100850 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100851 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
852
David Woodhouse310a5ab2009-06-28 18:52:20 +0100853 domain_flush_cache(domain, first_pte,
854 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700855
856 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700857
858 order = (large_page - 1) * 9;
859 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700860}
861
862/* free page table pages. last level pte should already be cleared */
863static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100864 unsigned long start_pfn,
865 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700866{
David Woodhouse6660c632009-06-27 22:41:00 +0100867 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100868 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700869 int total = agaw_to_level(domain->agaw);
870 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100871 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100872 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700873
David Woodhouse6660c632009-06-27 22:41:00 +0100874 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
875 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700876 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700877
David Woodhousef3a0a522009-06-30 03:40:07 +0100878 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700879 level = 2;
880 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100881 tmp = align_to_level(start_pfn, level);
882
David Woodhousef3a0a522009-06-30 03:40:07 +0100883 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100884 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700885 return;
886
David Woodhouse59c36282009-09-19 07:36:28 -0700887 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100888 large_page = level;
889 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
890 if (large_page > level)
891 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100892 if (!pte) {
893 tmp = align_to_level(tmp + 1, level + 1);
894 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700895 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100896 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100897 if (dma_pte_present(pte)) {
898 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
899 dma_clear_pte(pte);
900 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100901 pte++;
902 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100903 } while (!first_pte_in_page(pte) &&
904 tmp + level_size(level) - 1 <= last_pfn);
905
David Woodhousef3a0a522009-06-30 03:40:07 +0100906 domain_flush_cache(domain, first_pte,
907 (void *)pte - (void *)first_pte);
908
David Woodhouse59c36282009-09-19 07:36:28 -0700909 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 level++;
911 }
912 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100913 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700914 free_pgtable_page(domain->pgd);
915 domain->pgd = NULL;
916 }
917}
918
919/* iommu handling */
920static int iommu_alloc_root_entry(struct intel_iommu *iommu)
921{
922 struct root_entry *root;
923 unsigned long flags;
924
Suresh Siddha4c923d42009-10-02 11:01:24 -0700925 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700926 if (!root)
927 return -ENOMEM;
928
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700929 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700930
931 spin_lock_irqsave(&iommu->lock, flags);
932 iommu->root_entry = root;
933 spin_unlock_irqrestore(&iommu->lock, flags);
934
935 return 0;
936}
937
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700938static void iommu_set_root_entry(struct intel_iommu *iommu)
939{
940 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100941 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700942 unsigned long flag;
943
944 addr = iommu->root_entry;
945
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200946 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700947 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
948
David Woodhousec416daa2009-05-10 20:30:58 +0100949 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700950
951 /* Make sure hardware complete it */
952 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100953 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700954
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200955 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700956}
957
958static void iommu_flush_write_buffer(struct intel_iommu *iommu)
959{
960 u32 val;
961 unsigned long flag;
962
David Woodhouse9af88142009-02-13 23:18:03 +0000963 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700964 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200966 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100967 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700968
969 /* Make sure hardware complete it */
970 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100971 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700972
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200973 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700974}
975
976/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100977static void __iommu_flush_context(struct intel_iommu *iommu,
978 u16 did, u16 source_id, u8 function_mask,
979 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980{
981 u64 val = 0;
982 unsigned long flag;
983
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700984 switch (type) {
985 case DMA_CCMD_GLOBAL_INVL:
986 val = DMA_CCMD_GLOBAL_INVL;
987 break;
988 case DMA_CCMD_DOMAIN_INVL:
989 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
990 break;
991 case DMA_CCMD_DEVICE_INVL:
992 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
993 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
994 break;
995 default:
996 BUG();
997 }
998 val |= DMA_CCMD_ICC;
999
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001000 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001001 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1002
1003 /* Make sure hardware complete it */
1004 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1005 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1006
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001007 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001008}
1009
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001010/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001011static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1012 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001013{
1014 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1015 u64 val = 0, val_iva = 0;
1016 unsigned long flag;
1017
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001018 switch (type) {
1019 case DMA_TLB_GLOBAL_FLUSH:
1020 /* global flush doesn't need set IVA_REG */
1021 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1022 break;
1023 case DMA_TLB_DSI_FLUSH:
1024 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1025 break;
1026 case DMA_TLB_PSI_FLUSH:
1027 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1028 /* Note: always flush non-leaf currently */
1029 val_iva = size_order | addr;
1030 break;
1031 default:
1032 BUG();
1033 }
1034 /* Note: set drain read/write */
1035#if 0
1036 /*
1037 * This is probably to be super secure.. Looks like we can
1038 * ignore it without any impact.
1039 */
1040 if (cap_read_drain(iommu->cap))
1041 val |= DMA_TLB_READ_DRAIN;
1042#endif
1043 if (cap_write_drain(iommu->cap))
1044 val |= DMA_TLB_WRITE_DRAIN;
1045
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001046 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001047 /* Note: Only uses first TLB reg currently */
1048 if (val_iva)
1049 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1050 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1051
1052 /* Make sure hardware complete it */
1053 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1054 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1055
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001056 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001057
1058 /* check IOTLB invalidation granularity */
1059 if (DMA_TLB_IAIG(val) == 0)
1060 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1061 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1062 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001063 (unsigned long long)DMA_TLB_IIRG(type),
1064 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001065}
1066
Yu Zhao93a23a72009-05-18 13:51:37 +08001067static struct device_domain_info *iommu_support_dev_iotlb(
1068 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001069{
Yu Zhao93a23a72009-05-18 13:51:37 +08001070 int found = 0;
1071 unsigned long flags;
1072 struct device_domain_info *info;
1073 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1074
1075 if (!ecap_dev_iotlb_support(iommu->ecap))
1076 return NULL;
1077
1078 if (!iommu->qi)
1079 return NULL;
1080
1081 spin_lock_irqsave(&device_domain_lock, flags);
1082 list_for_each_entry(info, &domain->devices, link)
1083 if (info->bus == bus && info->devfn == devfn) {
1084 found = 1;
1085 break;
1086 }
1087 spin_unlock_irqrestore(&device_domain_lock, flags);
1088
1089 if (!found || !info->dev)
1090 return NULL;
1091
1092 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1093 return NULL;
1094
1095 if (!dmar_find_matched_atsr_unit(info->dev))
1096 return NULL;
1097
1098 info->iommu = iommu;
1099
1100 return info;
1101}
1102
1103static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1104{
1105 if (!info)
1106 return;
1107
1108 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1109}
1110
1111static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1112{
1113 if (!info->dev || !pci_ats_enabled(info->dev))
1114 return;
1115
1116 pci_disable_ats(info->dev);
1117}
1118
1119static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1120 u64 addr, unsigned mask)
1121{
1122 u16 sid, qdep;
1123 unsigned long flags;
1124 struct device_domain_info *info;
1125
1126 spin_lock_irqsave(&device_domain_lock, flags);
1127 list_for_each_entry(info, &domain->devices, link) {
1128 if (!info->dev || !pci_ats_enabled(info->dev))
1129 continue;
1130
1131 sid = info->bus << 8 | info->devfn;
1132 qdep = pci_ats_queue_depth(info->dev);
1133 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1134 }
1135 spin_unlock_irqrestore(&device_domain_lock, flags);
1136}
1137
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001138static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001139 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001140{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001141 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001142 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001143
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001144 BUG_ON(pages == 0);
1145
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001146 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001147 * Fallback to domain selective flush if no PSI support or the size is
1148 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001149 * PSI requires page size to be 2 ^ x, and the base address is naturally
1150 * aligned to the size
1151 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001152 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1153 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001154 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001155 else
1156 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1157 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001158
1159 /*
Nadav Amit82653632010-04-01 13:24:40 +03001160 * In caching mode, changes of pages from non-present to present require
1161 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001162 */
Nadav Amit82653632010-04-01 13:24:40 +03001163 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001164 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001165}
1166
mark grossf8bab732008-02-08 04:18:38 -08001167static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1168{
1169 u32 pmen;
1170 unsigned long flags;
1171
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001172 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001173 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1174 pmen &= ~DMA_PMEN_EPM;
1175 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1176
1177 /* wait for the protected region status bit to clear */
1178 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1179 readl, !(pmen & DMA_PMEN_PRS), pmen);
1180
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001181 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001182}
1183
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001184static int iommu_enable_translation(struct intel_iommu *iommu)
1185{
1186 u32 sts;
1187 unsigned long flags;
1188
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001189 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001190 iommu->gcmd |= DMA_GCMD_TE;
1191 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001192
1193 /* Make sure hardware complete it */
1194 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001195 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001196
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001197 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001198 return 0;
1199}
1200
1201static int iommu_disable_translation(struct intel_iommu *iommu)
1202{
1203 u32 sts;
1204 unsigned long flag;
1205
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001206 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207 iommu->gcmd &= ~DMA_GCMD_TE;
1208 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1209
1210 /* Make sure hardware complete it */
1211 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001212 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001214 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001215 return 0;
1216}
1217
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001218
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001219static int iommu_init_domains(struct intel_iommu *iommu)
1220{
1221 unsigned long ndomains;
1222 unsigned long nlongs;
1223
1224 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001225 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1226 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001227 nlongs = BITS_TO_LONGS(ndomains);
1228
Donald Dutile94a91b52009-08-20 16:51:34 -04001229 spin_lock_init(&iommu->lock);
1230
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001231 /* TBD: there might be 64K domains,
1232 * consider other allocation for future chip
1233 */
1234 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1235 if (!iommu->domain_ids) {
1236 printk(KERN_ERR "Allocating domain id array failed\n");
1237 return -ENOMEM;
1238 }
1239 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1240 GFP_KERNEL);
1241 if (!iommu->domains) {
1242 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001243 return -ENOMEM;
1244 }
1245
1246 /*
1247 * if Caching mode is set, then invalid translations are tagged
1248 * with domainid 0. Hence we need to pre-allocate it.
1249 */
1250 if (cap_caching_mode(iommu->cap))
1251 set_bit(0, iommu->domain_ids);
1252 return 0;
1253}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001254
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001255
1256static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001257static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001258
1259void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001260{
1261 struct dmar_domain *domain;
1262 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001263 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001264
Donald Dutile94a91b52009-08-20 16:51:34 -04001265 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001266 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001267 domain = iommu->domains[i];
1268 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001269
Donald Dutile94a91b52009-08-20 16:51:34 -04001270 spin_lock_irqsave(&domain->iommu_lock, flags);
1271 if (--domain->iommu_count == 0) {
1272 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1273 vm_domain_exit(domain);
1274 else
1275 domain_exit(domain);
1276 }
1277 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001278 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279 }
1280
1281 if (iommu->gcmd & DMA_GCMD_TE)
1282 iommu_disable_translation(iommu);
1283
1284 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001285 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001286 /* This will mask the irq */
1287 free_irq(iommu->irq, iommu);
1288 destroy_irq(iommu->irq);
1289 }
1290
1291 kfree(iommu->domains);
1292 kfree(iommu->domain_ids);
1293
Weidong Hand9630fe2008-12-08 11:06:32 +08001294 g_iommus[iommu->seq_id] = NULL;
1295
1296 /* if all iommus are freed, free g_iommus */
1297 for (i = 0; i < g_num_of_iommus; i++) {
1298 if (g_iommus[i])
1299 break;
1300 }
1301
1302 if (i == g_num_of_iommus)
1303 kfree(g_iommus);
1304
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001305 /* free context mapping */
1306 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001307}
1308
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001309static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001310{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001311 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001312
1313 domain = alloc_domain_mem();
1314 if (!domain)
1315 return NULL;
1316
Suresh Siddha4c923d42009-10-02 11:01:24 -07001317 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001318 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001319 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001320
1321 return domain;
1322}
1323
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001324static int iommu_attach_domain(struct dmar_domain *domain,
1325 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001327 int num;
1328 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001329 unsigned long flags;
1330
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001331 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001332
1333 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001334
1335 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1336 if (num >= ndomains) {
1337 spin_unlock_irqrestore(&iommu->lock, flags);
1338 printk(KERN_ERR "IOMMU: no free domain ids\n");
1339 return -ENOMEM;
1340 }
1341
1342 domain->id = num;
1343 set_bit(num, iommu->domain_ids);
1344 set_bit(iommu->seq_id, &domain->iommu_bmp);
1345 iommu->domains[num] = domain;
1346 spin_unlock_irqrestore(&iommu->lock, flags);
1347
1348 return 0;
1349}
1350
1351static void iommu_detach_domain(struct dmar_domain *domain,
1352 struct intel_iommu *iommu)
1353{
1354 unsigned long flags;
1355 int num, ndomains;
1356 int found = 0;
1357
1358 spin_lock_irqsave(&iommu->lock, flags);
1359 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001360 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001361 if (iommu->domains[num] == domain) {
1362 found = 1;
1363 break;
1364 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001365 }
1366
1367 if (found) {
1368 clear_bit(num, iommu->domain_ids);
1369 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1370 iommu->domains[num] = NULL;
1371 }
Weidong Han8c11e792008-12-08 15:29:22 +08001372 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001373}
1374
1375static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001376static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001377
Joseph Cihula51a63e62011-03-21 11:04:24 -07001378static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001379{
1380 struct pci_dev *pdev = NULL;
1381 struct iova *iova;
1382 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001383
David Millerf6611972008-02-06 01:36:23 -08001384 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001385
Mark Gross8a443df2008-03-04 14:59:31 -08001386 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1387 &reserved_rbtree_key);
1388
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001389 /* IOAPIC ranges shouldn't be accessed by DMA */
1390 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1391 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001392 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001393 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001394 return -ENODEV;
1395 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001396
1397 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1398 for_each_pci_dev(pdev) {
1399 struct resource *r;
1400
1401 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1402 r = &pdev->resource[i];
1403 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1404 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001405 iova = reserve_iova(&reserved_iova_list,
1406 IOVA_PFN(r->start),
1407 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001408 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001409 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001410 return -ENODEV;
1411 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001412 }
1413 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001414 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001415}
1416
1417static void domain_reserve_special_ranges(struct dmar_domain *domain)
1418{
1419 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1420}
1421
1422static inline int guestwidth_to_adjustwidth(int gaw)
1423{
1424 int agaw;
1425 int r = (gaw - 12) % 9;
1426
1427 if (r == 0)
1428 agaw = gaw;
1429 else
1430 agaw = gaw + 9 - r;
1431 if (agaw > 64)
1432 agaw = 64;
1433 return agaw;
1434}
1435
1436static int domain_init(struct dmar_domain *domain, int guest_width)
1437{
1438 struct intel_iommu *iommu;
1439 int adjust_width, agaw;
1440 unsigned long sagaw;
1441
David Millerf6611972008-02-06 01:36:23 -08001442 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001443 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001444
1445 domain_reserve_special_ranges(domain);
1446
1447 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001448 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001449 if (guest_width > cap_mgaw(iommu->cap))
1450 guest_width = cap_mgaw(iommu->cap);
1451 domain->gaw = guest_width;
1452 adjust_width = guestwidth_to_adjustwidth(guest_width);
1453 agaw = width_to_agaw(adjust_width);
1454 sagaw = cap_sagaw(iommu->cap);
1455 if (!test_bit(agaw, &sagaw)) {
1456 /* hardware doesn't support it, choose a bigger one */
1457 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1458 agaw = find_next_bit(&sagaw, 5, agaw);
1459 if (agaw >= 5)
1460 return -ENODEV;
1461 }
1462 domain->agaw = agaw;
1463 INIT_LIST_HEAD(&domain->devices);
1464
Weidong Han8e6040972008-12-08 15:49:06 +08001465 if (ecap_coherent(iommu->ecap))
1466 domain->iommu_coherency = 1;
1467 else
1468 domain->iommu_coherency = 0;
1469
Sheng Yang58c610b2009-03-18 15:33:05 +08001470 if (ecap_sc_support(iommu->ecap))
1471 domain->iommu_snooping = 1;
1472 else
1473 domain->iommu_snooping = 0;
1474
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001475 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001476 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001477 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001478
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001479 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001480 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001481 if (!domain->pgd)
1482 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001483 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001484 return 0;
1485}
1486
1487static void domain_exit(struct dmar_domain *domain)
1488{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001489 struct dmar_drhd_unit *drhd;
1490 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001491
1492 /* Domain 0 is reserved, so dont process it */
1493 if (!domain)
1494 return;
1495
Alex Williamson7b668352011-05-24 12:02:41 +01001496 /* Flush any lazy unmaps that may reference this domain */
1497 if (!intel_iommu_strict)
1498 flush_unmaps_timeout(0);
1499
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001500 domain_remove_dev_info(domain);
1501 /* destroy iovas */
1502 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001503
1504 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001505 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506
1507 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001508 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001509
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001510 for_each_active_iommu(iommu, drhd)
1511 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1512 iommu_detach_domain(domain, iommu);
1513
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001514 free_domain_mem(domain);
1515}
1516
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001517static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1518 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001519{
1520 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001522 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001523 struct dma_pte *pgd;
1524 unsigned long num;
1525 unsigned long ndomains;
1526 int id;
1527 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001528 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529
1530 pr_debug("Set context mapping for %02x:%02x.%d\n",
1531 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001532
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001533 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001534 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1535 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001536
David Woodhouse276dbf992009-04-04 01:45:37 +01001537 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001538 if (!iommu)
1539 return -ENODEV;
1540
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001541 context = device_to_context_entry(iommu, bus, devfn);
1542 if (!context)
1543 return -ENOMEM;
1544 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001545 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001546 spin_unlock_irqrestore(&iommu->lock, flags);
1547 return 0;
1548 }
1549
Weidong Hanea6606b2008-12-08 23:08:15 +08001550 id = domain->id;
1551 pgd = domain->pgd;
1552
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001553 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1554 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001555 int found = 0;
1556
1557 /* find an available domain id for this device in iommu */
1558 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001559 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001560 if (iommu->domains[num] == domain) {
1561 id = num;
1562 found = 1;
1563 break;
1564 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001565 }
1566
1567 if (found == 0) {
1568 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1569 if (num >= ndomains) {
1570 spin_unlock_irqrestore(&iommu->lock, flags);
1571 printk(KERN_ERR "IOMMU: no free domain ids\n");
1572 return -EFAULT;
1573 }
1574
1575 set_bit(num, iommu->domain_ids);
1576 iommu->domains[num] = domain;
1577 id = num;
1578 }
1579
1580 /* Skip top levels of page tables for
1581 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001582 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001583 */
Chris Wright1672af12009-12-02 12:06:34 -08001584 if (translation != CONTEXT_TT_PASS_THROUGH) {
1585 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1586 pgd = phys_to_virt(dma_pte_addr(pgd));
1587 if (!dma_pte_present(pgd)) {
1588 spin_unlock_irqrestore(&iommu->lock, flags);
1589 return -ENOMEM;
1590 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001591 }
1592 }
1593 }
1594
1595 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001596
Yu Zhao93a23a72009-05-18 13:51:37 +08001597 if (translation != CONTEXT_TT_PASS_THROUGH) {
1598 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1599 translation = info ? CONTEXT_TT_DEV_IOTLB :
1600 CONTEXT_TT_MULTI_LEVEL;
1601 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001602 /*
1603 * In pass through mode, AW must be programmed to indicate the largest
1604 * AGAW value supported by hardware. And ASR is ignored by hardware.
1605 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001606 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001607 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001608 else {
1609 context_set_address_root(context, virt_to_phys(pgd));
1610 context_set_address_width(context, iommu->agaw);
1611 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001612
1613 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001614 context_set_fault_enable(context);
1615 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001616 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001617
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001618 /*
1619 * It's a non-present to present mapping. If hardware doesn't cache
1620 * non-present entry we only need to flush the write-buffer. If the
1621 * _does_ cache non-present entries, then it does so in the special
1622 * domain #0, which we have to flush:
1623 */
1624 if (cap_caching_mode(iommu->cap)) {
1625 iommu->flush.flush_context(iommu, 0,
1626 (((u16)bus) << 8) | devfn,
1627 DMA_CCMD_MASK_NOBIT,
1628 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001629 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001630 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001631 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001632 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001633 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001634 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001635
1636 spin_lock_irqsave(&domain->iommu_lock, flags);
1637 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1638 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001639 if (domain->iommu_count == 1)
1640 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001641 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001642 }
1643 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001644 return 0;
1645}
1646
1647static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001648domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1649 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001650{
1651 int ret;
1652 struct pci_dev *tmp, *parent;
1653
David Woodhouse276dbf992009-04-04 01:45:37 +01001654 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001655 pdev->bus->number, pdev->devfn,
1656 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001657 if (ret)
1658 return ret;
1659
1660 /* dependent device mapping */
1661 tmp = pci_find_upstream_pcie_bridge(pdev);
1662 if (!tmp)
1663 return 0;
1664 /* Secondary interface's bus number and devfn 0 */
1665 parent = pdev->bus->self;
1666 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001667 ret = domain_context_mapping_one(domain,
1668 pci_domain_nr(parent->bus),
1669 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001670 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001671 if (ret)
1672 return ret;
1673 parent = parent->bus->self;
1674 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001675 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001676 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001677 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001678 tmp->subordinate->number, 0,
1679 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001680 else /* this is a legacy PCI bridge */
1681 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001682 pci_domain_nr(tmp->bus),
1683 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001684 tmp->devfn,
1685 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686}
1687
Weidong Han5331fe62008-12-08 23:00:00 +08001688static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001689{
1690 int ret;
1691 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001692 struct intel_iommu *iommu;
1693
David Woodhouse276dbf992009-04-04 01:45:37 +01001694 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1695 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001696 if (!iommu)
1697 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001698
David Woodhouse276dbf992009-04-04 01:45:37 +01001699 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001700 if (!ret)
1701 return ret;
1702 /* dependent device mapping */
1703 tmp = pci_find_upstream_pcie_bridge(pdev);
1704 if (!tmp)
1705 return ret;
1706 /* Secondary interface's bus number and devfn 0 */
1707 parent = pdev->bus->self;
1708 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001709 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001710 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001711 if (!ret)
1712 return ret;
1713 parent = parent->bus->self;
1714 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001715 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001716 return device_context_mapped(iommu, tmp->subordinate->number,
1717 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001718 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001719 return device_context_mapped(iommu, tmp->bus->number,
1720 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001721}
1722
Fenghua Yuf5329592009-08-04 15:09:37 -07001723/* Returns a number of VTD pages, but aligned to MM page size */
1724static inline unsigned long aligned_nrpages(unsigned long host_addr,
1725 size_t size)
1726{
1727 host_addr &= ~PAGE_MASK;
1728 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1729}
1730
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001731/* Return largest possible superpage level for a given mapping */
1732static inline int hardware_largepage_caps(struct dmar_domain *domain,
1733 unsigned long iov_pfn,
1734 unsigned long phy_pfn,
1735 unsigned long pages)
1736{
1737 int support, level = 1;
1738 unsigned long pfnmerge;
1739
1740 support = domain->iommu_superpage;
1741
1742 /* To use a large page, the virtual *and* physical addresses
1743 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1744 of them will mean we have to use smaller pages. So just
1745 merge them and check both at once. */
1746 pfnmerge = iov_pfn | phy_pfn;
1747
1748 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1749 pages >>= VTD_STRIDE_SHIFT;
1750 if (!pages)
1751 break;
1752 pfnmerge >>= VTD_STRIDE_SHIFT;
1753 level++;
1754 support--;
1755 }
1756 return level;
1757}
1758
David Woodhouse9051aa02009-06-29 12:30:54 +01001759static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1760 struct scatterlist *sg, unsigned long phys_pfn,
1761 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001762{
1763 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001764 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001765 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001766 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001767 unsigned int largepage_lvl = 0;
1768 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001769
1770 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1771
1772 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1773 return -EINVAL;
1774
1775 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1776
David Woodhouse9051aa02009-06-29 12:30:54 +01001777 if (sg)
1778 sg_res = 0;
1779 else {
1780 sg_res = nr_pages + 1;
1781 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1782 }
1783
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001784 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001785 uint64_t tmp;
1786
David Woodhousee1605492009-06-29 11:17:38 +01001787 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001788 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001789 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1790 sg->dma_length = sg->length;
1791 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001792 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001793 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001794
David Woodhousee1605492009-06-29 11:17:38 +01001795 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001796 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1797
1798 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001799 if (!pte)
1800 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001801 /* It is large page*/
1802 if (largepage_lvl > 1)
1803 pteval |= DMA_PTE_LARGE_PAGE;
1804 else
1805 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1806
David Woodhousee1605492009-06-29 11:17:38 +01001807 }
1808 /* We don't need lock here, nobody else
1809 * touches the iova range
1810 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001811 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001812 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001813 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001814 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1815 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001816 if (dumps) {
1817 dumps--;
1818 debug_dma_dump_mappings(NULL);
1819 }
1820 WARN_ON(1);
1821 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001822
1823 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1824
1825 BUG_ON(nr_pages < lvl_pages);
1826 BUG_ON(sg_res < lvl_pages);
1827
1828 nr_pages -= lvl_pages;
1829 iov_pfn += lvl_pages;
1830 phys_pfn += lvl_pages;
1831 pteval += lvl_pages * VTD_PAGE_SIZE;
1832 sg_res -= lvl_pages;
1833
1834 /* If the next PTE would be the first in a new page, then we
1835 need to flush the cache on the entries we've just written.
1836 And then we'll need to recalculate 'pte', so clear it and
1837 let it get set again in the if (!pte) block above.
1838
1839 If we're done (!nr_pages) we need to flush the cache too.
1840
1841 Also if we've been setting superpages, we may need to
1842 recalculate 'pte' and switch back to smaller pages for the
1843 end of the mapping, if the trailing size is not enough to
1844 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001845 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001846 if (!nr_pages || first_pte_in_page(pte) ||
1847 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001848 domain_flush_cache(domain, first_pte,
1849 (void *)pte - (void *)first_pte);
1850 pte = NULL;
1851 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001852
1853 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001854 sg = sg_next(sg);
1855 }
1856 return 0;
1857}
1858
David Woodhouse9051aa02009-06-29 12:30:54 +01001859static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1860 struct scatterlist *sg, unsigned long nr_pages,
1861 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001862{
David Woodhouse9051aa02009-06-29 12:30:54 +01001863 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1864}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001865
David Woodhouse9051aa02009-06-29 12:30:54 +01001866static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1867 unsigned long phys_pfn, unsigned long nr_pages,
1868 int prot)
1869{
1870 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001871}
1872
Weidong Hanc7151a82008-12-08 22:51:37 +08001873static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001874{
Weidong Hanc7151a82008-12-08 22:51:37 +08001875 if (!iommu)
1876 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001877
1878 clear_context_table(iommu, bus, devfn);
1879 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001880 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001881 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001882}
1883
1884static void domain_remove_dev_info(struct dmar_domain *domain)
1885{
1886 struct device_domain_info *info;
1887 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001888 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889
1890 spin_lock_irqsave(&device_domain_lock, flags);
1891 while (!list_empty(&domain->devices)) {
1892 info = list_entry(domain->devices.next,
1893 struct device_domain_info, link);
1894 list_del(&info->link);
1895 list_del(&info->global);
1896 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001897 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001898 spin_unlock_irqrestore(&device_domain_lock, flags);
1899
Yu Zhao93a23a72009-05-18 13:51:37 +08001900 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001901 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001902 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001903 free_devinfo_mem(info);
1904
1905 spin_lock_irqsave(&device_domain_lock, flags);
1906 }
1907 spin_unlock_irqrestore(&device_domain_lock, flags);
1908}
1909
1910/*
1911 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001912 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001913 */
Kay, Allen M38717942008-09-09 18:37:29 +03001914static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001915find_domain(struct pci_dev *pdev)
1916{
1917 struct device_domain_info *info;
1918
1919 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001920 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001921 if (info)
1922 return info->domain;
1923 return NULL;
1924}
1925
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001926/* domain is initialized */
1927static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1928{
1929 struct dmar_domain *domain, *found = NULL;
1930 struct intel_iommu *iommu;
1931 struct dmar_drhd_unit *drhd;
1932 struct device_domain_info *info, *tmp;
1933 struct pci_dev *dev_tmp;
1934 unsigned long flags;
1935 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001936 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001937 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001938
1939 domain = find_domain(pdev);
1940 if (domain)
1941 return domain;
1942
David Woodhouse276dbf992009-04-04 01:45:37 +01001943 segment = pci_domain_nr(pdev->bus);
1944
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001945 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1946 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001947 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001948 bus = dev_tmp->subordinate->number;
1949 devfn = 0;
1950 } else {
1951 bus = dev_tmp->bus->number;
1952 devfn = dev_tmp->devfn;
1953 }
1954 spin_lock_irqsave(&device_domain_lock, flags);
1955 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001956 if (info->segment == segment &&
1957 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001958 found = info->domain;
1959 break;
1960 }
1961 }
1962 spin_unlock_irqrestore(&device_domain_lock, flags);
1963 /* pcie-pci bridge already has a domain, uses it */
1964 if (found) {
1965 domain = found;
1966 goto found_domain;
1967 }
1968 }
1969
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001970 domain = alloc_domain();
1971 if (!domain)
1972 goto error;
1973
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001974 /* Allocate new domain for the device */
1975 drhd = dmar_find_matched_drhd_unit(pdev);
1976 if (!drhd) {
1977 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1978 pci_name(pdev));
1979 return NULL;
1980 }
1981 iommu = drhd->iommu;
1982
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001983 ret = iommu_attach_domain(domain, iommu);
1984 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07001985 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001986 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001987 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001988
1989 if (domain_init(domain, gaw)) {
1990 domain_exit(domain);
1991 goto error;
1992 }
1993
1994 /* register pcie-to-pci device */
1995 if (dev_tmp) {
1996 info = alloc_devinfo_mem();
1997 if (!info) {
1998 domain_exit(domain);
1999 goto error;
2000 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002001 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002002 info->bus = bus;
2003 info->devfn = devfn;
2004 info->dev = NULL;
2005 info->domain = domain;
2006 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002007 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002008
2009 /* pcie-to-pci bridge already has a domain, uses it */
2010 found = NULL;
2011 spin_lock_irqsave(&device_domain_lock, flags);
2012 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002013 if (tmp->segment == segment &&
2014 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002015 found = tmp->domain;
2016 break;
2017 }
2018 }
2019 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002020 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002021 free_devinfo_mem(info);
2022 domain_exit(domain);
2023 domain = found;
2024 } else {
2025 list_add(&info->link, &domain->devices);
2026 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002027 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002028 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002029 }
2030
2031found_domain:
2032 info = alloc_devinfo_mem();
2033 if (!info)
2034 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002035 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002036 info->bus = pdev->bus->number;
2037 info->devfn = pdev->devfn;
2038 info->dev = pdev;
2039 info->domain = domain;
2040 spin_lock_irqsave(&device_domain_lock, flags);
2041 /* somebody is fast */
2042 found = find_domain(pdev);
2043 if (found != NULL) {
2044 spin_unlock_irqrestore(&device_domain_lock, flags);
2045 if (found != domain) {
2046 domain_exit(domain);
2047 domain = found;
2048 }
2049 free_devinfo_mem(info);
2050 return domain;
2051 }
2052 list_add(&info->link, &domain->devices);
2053 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002054 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002055 spin_unlock_irqrestore(&device_domain_lock, flags);
2056 return domain;
2057error:
2058 /* recheck it here, maybe others set it */
2059 return find_domain(pdev);
2060}
2061
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002062static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002063#define IDENTMAP_ALL 1
2064#define IDENTMAP_GFX 2
2065#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002066
David Woodhouseb2132032009-06-26 18:50:28 +01002067static int iommu_domain_identity_map(struct dmar_domain *domain,
2068 unsigned long long start,
2069 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002070{
David Woodhousec5395d52009-06-28 16:35:56 +01002071 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2072 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002073
David Woodhousec5395d52009-06-28 16:35:56 +01002074 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2075 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002076 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002077 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002078 }
2079
David Woodhousec5395d52009-06-28 16:35:56 +01002080 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2081 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002082 /*
2083 * RMRR range might have overlap with physical memory range,
2084 * clear it first
2085 */
David Woodhousec5395d52009-06-28 16:35:56 +01002086 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002087
David Woodhousec5395d52009-06-28 16:35:56 +01002088 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2089 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002090 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002091}
2092
2093static int iommu_prepare_identity_map(struct pci_dev *pdev,
2094 unsigned long long start,
2095 unsigned long long end)
2096{
2097 struct dmar_domain *domain;
2098 int ret;
2099
David Woodhousec7ab48d2009-06-26 19:10:36 +01002100 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002101 if (!domain)
2102 return -ENOMEM;
2103
David Woodhouse19943b02009-08-04 16:19:20 +01002104 /* For _hardware_ passthrough, don't bother. But for software
2105 passthrough, we do it anyway -- it may indicate a memory
2106 range which is reserved in E820, so which didn't get set
2107 up to start with in si_domain */
2108 if (domain == si_domain && hw_pass_through) {
2109 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2110 pci_name(pdev), start, end);
2111 return 0;
2112 }
2113
2114 printk(KERN_INFO
2115 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2116 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002117
David Woodhouse5595b522009-12-02 09:21:55 +00002118 if (end < start) {
2119 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2120 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2121 dmi_get_system_info(DMI_BIOS_VENDOR),
2122 dmi_get_system_info(DMI_BIOS_VERSION),
2123 dmi_get_system_info(DMI_PRODUCT_VERSION));
2124 ret = -EIO;
2125 goto error;
2126 }
2127
David Woodhouse2ff729f2009-08-26 14:25:41 +01002128 if (end >> agaw_to_width(domain->agaw)) {
2129 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2130 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2131 agaw_to_width(domain->agaw),
2132 dmi_get_system_info(DMI_BIOS_VENDOR),
2133 dmi_get_system_info(DMI_BIOS_VERSION),
2134 dmi_get_system_info(DMI_PRODUCT_VERSION));
2135 ret = -EIO;
2136 goto error;
2137 }
David Woodhouse19943b02009-08-04 16:19:20 +01002138
David Woodhouseb2132032009-06-26 18:50:28 +01002139 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002140 if (ret)
2141 goto error;
2142
2143 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002144 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002145 if (ret)
2146 goto error;
2147
2148 return 0;
2149
2150 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002151 domain_exit(domain);
2152 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002153}
2154
2155static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2156 struct pci_dev *pdev)
2157{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002158 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002159 return 0;
2160 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002161 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002162}
2163
Suresh Siddhad3f13812011-08-23 17:05:25 -07002164#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002165static inline void iommu_prepare_isa(void)
2166{
2167 struct pci_dev *pdev;
2168 int ret;
2169
2170 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2171 if (!pdev)
2172 return;
2173
David Woodhousec7ab48d2009-06-26 19:10:36 +01002174 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002175 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002176
2177 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002178 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2179 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002180
2181}
2182#else
2183static inline void iommu_prepare_isa(void)
2184{
2185 return;
2186}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002187#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002188
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002189static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002190
2191static int __init si_domain_work_fn(unsigned long start_pfn,
2192 unsigned long end_pfn, void *datax)
2193{
2194 int *ret = datax;
2195
2196 *ret = iommu_domain_identity_map(si_domain,
2197 (uint64_t)start_pfn << PAGE_SHIFT,
2198 (uint64_t)end_pfn << PAGE_SHIFT);
2199 return *ret;
2200
2201}
2202
Matt Kraai071e1372009-08-23 22:30:22 -07002203static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002204{
2205 struct dmar_drhd_unit *drhd;
2206 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002207 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002208
2209 si_domain = alloc_domain();
2210 if (!si_domain)
2211 return -EFAULT;
2212
David Woodhousec7ab48d2009-06-26 19:10:36 +01002213 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002214
2215 for_each_active_iommu(iommu, drhd) {
2216 ret = iommu_attach_domain(si_domain, iommu);
2217 if (ret) {
2218 domain_exit(si_domain);
2219 return -EFAULT;
2220 }
2221 }
2222
2223 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2224 domain_exit(si_domain);
2225 return -EFAULT;
2226 }
2227
2228 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2229
David Woodhouse19943b02009-08-04 16:19:20 +01002230 if (hw)
2231 return 0;
2232
David Woodhousec7ab48d2009-06-26 19:10:36 +01002233 for_each_online_node(nid) {
2234 work_with_active_regions(nid, si_domain_work_fn, &ret);
2235 if (ret)
2236 return ret;
2237 }
2238
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002239 return 0;
2240}
2241
2242static void domain_remove_one_dev_info(struct dmar_domain *domain,
2243 struct pci_dev *pdev);
2244static int identity_mapping(struct pci_dev *pdev)
2245{
2246 struct device_domain_info *info;
2247
2248 if (likely(!iommu_identity_mapping))
2249 return 0;
2250
Mike Traviscb452a42011-05-28 13:15:03 -05002251 info = pdev->dev.archdata.iommu;
2252 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2253 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002254
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002255 return 0;
2256}
2257
2258static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002259 struct pci_dev *pdev,
2260 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002261{
2262 struct device_domain_info *info;
2263 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002264 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002265
2266 info = alloc_devinfo_mem();
2267 if (!info)
2268 return -ENOMEM;
2269
David Woodhouse5fe60f42009-08-09 10:53:41 +01002270 ret = domain_context_mapping(domain, pdev, translation);
2271 if (ret) {
2272 free_devinfo_mem(info);
2273 return ret;
2274 }
2275
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002276 info->segment = pci_domain_nr(pdev->bus);
2277 info->bus = pdev->bus->number;
2278 info->devfn = pdev->devfn;
2279 info->dev = pdev;
2280 info->domain = domain;
2281
2282 spin_lock_irqsave(&device_domain_lock, flags);
2283 list_add(&info->link, &domain->devices);
2284 list_add(&info->global, &device_domain_list);
2285 pdev->dev.archdata.iommu = info;
2286 spin_unlock_irqrestore(&device_domain_lock, flags);
2287
2288 return 0;
2289}
2290
David Woodhouse6941af22009-07-04 18:24:27 +01002291static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2292{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002293 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2294 return 1;
2295
2296 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2297 return 1;
2298
2299 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2300 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002301
David Woodhouse3dfc8132009-07-04 19:11:08 +01002302 /*
2303 * We want to start off with all devices in the 1:1 domain, and
2304 * take them out later if we find they can't access all of memory.
2305 *
2306 * However, we can't do this for PCI devices behind bridges,
2307 * because all PCI devices behind the same bridge will end up
2308 * with the same source-id on their transactions.
2309 *
2310 * Practically speaking, we can't change things around for these
2311 * devices at run-time, because we can't be sure there'll be no
2312 * DMA transactions in flight for any of their siblings.
2313 *
2314 * So PCI devices (unless they're on the root bus) as well as
2315 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2316 * the 1:1 domain, just in _case_ one of their siblings turns out
2317 * not to be able to map all of memory.
2318 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002319 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002320 if (!pci_is_root_bus(pdev->bus))
2321 return 0;
2322 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2323 return 0;
2324 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2325 return 0;
2326
2327 /*
2328 * At boot time, we don't yet know if devices will be 64-bit capable.
2329 * Assume that they will -- if they turn out not to be, then we can
2330 * take them out of the 1:1 domain later.
2331 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002332 if (!startup) {
2333 /*
2334 * If the device's dma_mask is less than the system's memory
2335 * size then this is not a candidate for identity mapping.
2336 */
2337 u64 dma_mask = pdev->dma_mask;
2338
2339 if (pdev->dev.coherent_dma_mask &&
2340 pdev->dev.coherent_dma_mask < dma_mask)
2341 dma_mask = pdev->dev.coherent_dma_mask;
2342
2343 return dma_mask >= dma_get_required_mask(&pdev->dev);
2344 }
David Woodhouse6941af22009-07-04 18:24:27 +01002345
2346 return 1;
2347}
2348
Matt Kraai071e1372009-08-23 22:30:22 -07002349static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002350{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002351 struct pci_dev *pdev = NULL;
2352 int ret;
2353
David Woodhouse19943b02009-08-04 16:19:20 +01002354 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002355 if (ret)
2356 return -EFAULT;
2357
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002358 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002359 /* Skip Host/PCI Bridge devices */
2360 if (IS_BRIDGE_HOST_DEVICE(pdev))
2361 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002362 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002363 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2364 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002365
David Woodhouse5fe60f42009-08-09 10:53:41 +01002366 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002367 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002368 CONTEXT_TT_MULTI_LEVEL);
2369 if (ret)
2370 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002371 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002372 }
2373
2374 return 0;
2375}
2376
Joseph Cihulab7792602011-05-03 00:08:37 -07002377static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002378{
2379 struct dmar_drhd_unit *drhd;
2380 struct dmar_rmrr_unit *rmrr;
2381 struct pci_dev *pdev;
2382 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002383 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002384
2385 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002386 * for each drhd
2387 * allocate root
2388 * initialize and program root entry to not present
2389 * endfor
2390 */
2391 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002392 g_num_of_iommus++;
2393 /*
2394 * lock not needed as this is only incremented in the single
2395 * threaded kernel __init code path all other access are read
2396 * only
2397 */
2398 }
2399
Weidong Hand9630fe2008-12-08 11:06:32 +08002400 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2401 GFP_KERNEL);
2402 if (!g_iommus) {
2403 printk(KERN_ERR "Allocating global iommu array failed\n");
2404 ret = -ENOMEM;
2405 goto error;
2406 }
2407
mark gross80b20dd2008-04-18 13:53:58 -07002408 deferred_flush = kzalloc(g_num_of_iommus *
2409 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2410 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002411 ret = -ENOMEM;
2412 goto error;
2413 }
2414
mark gross5e0d2a62008-03-04 15:22:08 -08002415 for_each_drhd_unit(drhd) {
2416 if (drhd->ignored)
2417 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002418
2419 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002420 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002421
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002422 ret = iommu_init_domains(iommu);
2423 if (ret)
2424 goto error;
2425
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002426 /*
2427 * TBD:
2428 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002429 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002430 */
2431 ret = iommu_alloc_root_entry(iommu);
2432 if (ret) {
2433 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2434 goto error;
2435 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002436 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002437 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002438 }
2439
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002440 /*
2441 * Start from the sane iommu hardware state.
2442 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002443 for_each_drhd_unit(drhd) {
2444 if (drhd->ignored)
2445 continue;
2446
2447 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002448
2449 /*
2450 * If the queued invalidation is already initialized by us
2451 * (for example, while enabling interrupt-remapping) then
2452 * we got the things already rolling from a sane state.
2453 */
2454 if (iommu->qi)
2455 continue;
2456
2457 /*
2458 * Clear any previous faults.
2459 */
2460 dmar_fault(-1, iommu);
2461 /*
2462 * Disable queued invalidation if supported and already enabled
2463 * before OS handover.
2464 */
2465 dmar_disable_qi(iommu);
2466 }
2467
2468 for_each_drhd_unit(drhd) {
2469 if (drhd->ignored)
2470 continue;
2471
2472 iommu = drhd->iommu;
2473
Youquan Songa77b67d2008-10-16 16:31:56 -07002474 if (dmar_enable_qi(iommu)) {
2475 /*
2476 * Queued Invalidate not enabled, use Register Based
2477 * Invalidate
2478 */
2479 iommu->flush.flush_context = __iommu_flush_context;
2480 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002481 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002482 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002483 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002484 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002485 } else {
2486 iommu->flush.flush_context = qi_flush_context;
2487 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002488 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002489 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002490 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002491 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002492 }
2493 }
2494
David Woodhouse19943b02009-08-04 16:19:20 +01002495 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002496 iommu_identity_mapping |= IDENTMAP_ALL;
2497
Suresh Siddhad3f13812011-08-23 17:05:25 -07002498#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002499 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002500#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002501
2502 check_tylersburg_isoch();
2503
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002504 /*
2505 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002506 * identity mappings for rmrr, gfx, and isa and may fall back to static
2507 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002508 */
David Woodhouse19943b02009-08-04 16:19:20 +01002509 if (iommu_identity_mapping) {
2510 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2511 if (ret) {
2512 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2513 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002514 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002515 }
David Woodhouse19943b02009-08-04 16:19:20 +01002516 /*
2517 * For each rmrr
2518 * for each dev attached to rmrr
2519 * do
2520 * locate drhd for dev, alloc domain for dev
2521 * allocate free domain
2522 * allocate page table entries for rmrr
2523 * if context not allocated for bus
2524 * allocate and init context
2525 * set present in root table for this bus
2526 * init context with domain, translation etc
2527 * endfor
2528 * endfor
2529 */
2530 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2531 for_each_rmrr_units(rmrr) {
2532 for (i = 0; i < rmrr->devices_cnt; i++) {
2533 pdev = rmrr->devices[i];
2534 /*
2535 * some BIOS lists non-exist devices in DMAR
2536 * table.
2537 */
2538 if (!pdev)
2539 continue;
2540 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2541 if (ret)
2542 printk(KERN_ERR
2543 "IOMMU: mapping reserved region failed\n");
2544 }
2545 }
2546
2547 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002548
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002549 /*
2550 * for each drhd
2551 * enable fault log
2552 * global invalidate context cache
2553 * global invalidate iotlb
2554 * enable translation
2555 */
2556 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002557 if (drhd->ignored) {
2558 /*
2559 * we always have to disable PMRs or DMA may fail on
2560 * this device
2561 */
2562 if (force_on)
2563 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002564 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002565 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002566 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002567
2568 iommu_flush_write_buffer(iommu);
2569
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002570 ret = dmar_set_interrupt(iommu);
2571 if (ret)
2572 goto error;
2573
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002574 iommu_set_root_entry(iommu);
2575
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002576 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002577 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002578
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002579 ret = iommu_enable_translation(iommu);
2580 if (ret)
2581 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002582
2583 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002584 }
2585
2586 return 0;
2587error:
2588 for_each_drhd_unit(drhd) {
2589 if (drhd->ignored)
2590 continue;
2591 iommu = drhd->iommu;
2592 free_iommu(iommu);
2593 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002594 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002595 return ret;
2596}
2597
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002598/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002599static struct iova *intel_alloc_iova(struct device *dev,
2600 struct dmar_domain *domain,
2601 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002602{
2603 struct pci_dev *pdev = to_pci_dev(dev);
2604 struct iova *iova = NULL;
2605
David Woodhouse875764d2009-06-28 21:20:51 +01002606 /* Restrict dma_mask to the width that the iommu can handle */
2607 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2608
2609 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002610 /*
2611 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002612 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002613 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002614 */
David Woodhouse875764d2009-06-28 21:20:51 +01002615 iova = alloc_iova(&domain->iovad, nrpages,
2616 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2617 if (iova)
2618 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002619 }
David Woodhouse875764d2009-06-28 21:20:51 +01002620 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2621 if (unlikely(!iova)) {
2622 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2623 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002624 return NULL;
2625 }
2626
2627 return iova;
2628}
2629
David Woodhouse147202a2009-07-07 19:43:20 +01002630static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002631{
2632 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002633 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002634
2635 domain = get_domain_for_dev(pdev,
2636 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2637 if (!domain) {
2638 printk(KERN_ERR
2639 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002640 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002641 }
2642
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002643 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002644 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002645 ret = domain_context_mapping(domain, pdev,
2646 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002647 if (ret) {
2648 printk(KERN_ERR
2649 "Domain context map for %s failed",
2650 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002651 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002652 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002653 }
2654
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002655 return domain;
2656}
2657
David Woodhouse147202a2009-07-07 19:43:20 +01002658static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2659{
2660 struct device_domain_info *info;
2661
2662 /* No lock here, assumes no domain exit in normal case */
2663 info = dev->dev.archdata.iommu;
2664 if (likely(info))
2665 return info->domain;
2666
2667 return __get_valid_domain_for_dev(dev);
2668}
2669
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002670static int iommu_dummy(struct pci_dev *pdev)
2671{
2672 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2673}
2674
2675/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002676static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002677{
David Woodhouse73676832009-07-04 14:08:36 +01002678 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002679 int found;
2680
David Woodhouse73676832009-07-04 14:08:36 +01002681 if (unlikely(dev->bus != &pci_bus_type))
2682 return 1;
2683
2684 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002685 if (iommu_dummy(pdev))
2686 return 1;
2687
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002688 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002689 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002690
2691 found = identity_mapping(pdev);
2692 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002693 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002694 return 1;
2695 else {
2696 /*
2697 * 32 bit DMA is removed from si_domain and fall back
2698 * to non-identity mapping.
2699 */
2700 domain_remove_one_dev_info(si_domain, pdev);
2701 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2702 pci_name(pdev));
2703 return 0;
2704 }
2705 } else {
2706 /*
2707 * In case of a detached 64 bit DMA device from vm, the device
2708 * is put into si_domain for identity mapping.
2709 */
David Woodhouse6941af22009-07-04 18:24:27 +01002710 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002711 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002712 ret = domain_add_dev_info(si_domain, pdev,
2713 hw_pass_through ?
2714 CONTEXT_TT_PASS_THROUGH :
2715 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002716 if (!ret) {
2717 printk(KERN_INFO "64bit %s uses identity mapping\n",
2718 pci_name(pdev));
2719 return 1;
2720 }
2721 }
2722 }
2723
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002724 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002725}
2726
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002727static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2728 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002729{
2730 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002731 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002732 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002733 struct iova *iova;
2734 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002735 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002736 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002737 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002738
2739 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002740
David Woodhouse73676832009-07-04 14:08:36 +01002741 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002742 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002743
2744 domain = get_valid_domain_for_dev(pdev);
2745 if (!domain)
2746 return 0;
2747
Weidong Han8c11e792008-12-08 15:29:22 +08002748 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002749 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002750
Mike Travisc681d0b2011-05-28 13:15:05 -05002751 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002752 if (!iova)
2753 goto error;
2754
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002755 /*
2756 * Check if DMAR supports zero-length reads on write only
2757 * mappings..
2758 */
2759 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002760 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002761 prot |= DMA_PTE_READ;
2762 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2763 prot |= DMA_PTE_WRITE;
2764 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002765 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002766 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002767 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002768 * is not a big problem
2769 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002770 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002771 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002772 if (ret)
2773 goto error;
2774
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002775 /* it's a non-present to present mapping. Only flush if caching mode */
2776 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002777 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002778 else
Weidong Han8c11e792008-12-08 15:29:22 +08002779 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002780
David Woodhouse03d6a242009-06-28 15:33:46 +01002781 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2782 start_paddr += paddr & ~PAGE_MASK;
2783 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002784
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002785error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002786 if (iova)
2787 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002788 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002789 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002790 return 0;
2791}
2792
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002793static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2794 unsigned long offset, size_t size,
2795 enum dma_data_direction dir,
2796 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002797{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002798 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2799 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002800}
2801
mark gross5e0d2a62008-03-04 15:22:08 -08002802static void flush_unmaps(void)
2803{
mark gross80b20dd2008-04-18 13:53:58 -07002804 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002805
mark gross5e0d2a62008-03-04 15:22:08 -08002806 timer_on = 0;
2807
2808 /* just flush them all */
2809 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002810 struct intel_iommu *iommu = g_iommus[i];
2811 if (!iommu)
2812 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002813
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002814 if (!deferred_flush[i].next)
2815 continue;
2816
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002817 /* In caching mode, global flushes turn emulation expensive */
2818 if (!cap_caching_mode(iommu->cap))
2819 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002820 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002821 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002822 unsigned long mask;
2823 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002824 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002825
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002826 /* On real hardware multiple invalidations are expensive */
2827 if (cap_caching_mode(iommu->cap))
2828 iommu_flush_iotlb_psi(iommu, domain->id,
2829 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2830 else {
2831 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2832 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2833 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2834 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002835 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002836 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002837 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002838 }
2839
mark gross5e0d2a62008-03-04 15:22:08 -08002840 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002841}
2842
2843static void flush_unmaps_timeout(unsigned long data)
2844{
mark gross80b20dd2008-04-18 13:53:58 -07002845 unsigned long flags;
2846
2847 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002848 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002849 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002850}
2851
2852static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2853{
2854 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002855 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002856 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002857
2858 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002859 if (list_size == HIGH_WATER_MARK)
2860 flush_unmaps();
2861
Weidong Han8c11e792008-12-08 15:29:22 +08002862 iommu = domain_get_iommu(dom);
2863 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002864
mark gross80b20dd2008-04-18 13:53:58 -07002865 next = deferred_flush[iommu_id].next;
2866 deferred_flush[iommu_id].domain[next] = dom;
2867 deferred_flush[iommu_id].iova[next] = iova;
2868 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002869
2870 if (!timer_on) {
2871 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2872 timer_on = 1;
2873 }
2874 list_size++;
2875 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2876}
2877
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002878static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2879 size_t size, enum dma_data_direction dir,
2880 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002881{
2882 struct pci_dev *pdev = to_pci_dev(dev);
2883 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002884 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002885 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002886 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002887
David Woodhouse73676832009-07-04 14:08:36 +01002888 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002889 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002890
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002891 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002892 BUG_ON(!domain);
2893
Weidong Han8c11e792008-12-08 15:29:22 +08002894 iommu = domain_get_iommu(domain);
2895
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002896 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002897 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2898 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002899 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002900
David Woodhoused794dc92009-06-28 00:27:49 +01002901 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2902 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002903
David Woodhoused794dc92009-06-28 00:27:49 +01002904 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2905 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002906
2907 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002908 dma_pte_clear_range(domain, start_pfn, last_pfn);
2909
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002910 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002911 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2912
mark gross5e0d2a62008-03-04 15:22:08 -08002913 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002914 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002915 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002916 /* free iova */
2917 __free_iova(&domain->iovad, iova);
2918 } else {
2919 add_unmap(domain, iova);
2920 /*
2921 * queue up the release of the unmap to save the 1/6th of the
2922 * cpu used up by the iotlb flush operation...
2923 */
mark gross5e0d2a62008-03-04 15:22:08 -08002924 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002925}
2926
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002927static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2928 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002929{
2930 void *vaddr;
2931 int order;
2932
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002933 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002934 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002935
2936 if (!iommu_no_mapping(hwdev))
2937 flags &= ~(GFP_DMA | GFP_DMA32);
2938 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2939 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2940 flags |= GFP_DMA;
2941 else
2942 flags |= GFP_DMA32;
2943 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944
2945 vaddr = (void *)__get_free_pages(flags, order);
2946 if (!vaddr)
2947 return NULL;
2948 memset(vaddr, 0, size);
2949
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002950 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2951 DMA_BIDIRECTIONAL,
2952 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002953 if (*dma_handle)
2954 return vaddr;
2955 free_pages((unsigned long)vaddr, order);
2956 return NULL;
2957}
2958
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002959static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2960 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002961{
2962 int order;
2963
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002964 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002965 order = get_order(size);
2966
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002967 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002968 free_pages((unsigned long)vaddr, order);
2969}
2970
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002971static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2972 int nelems, enum dma_data_direction dir,
2973 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002974{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002975 struct pci_dev *pdev = to_pci_dev(hwdev);
2976 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002977 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002978 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002979 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002980
David Woodhouse73676832009-07-04 14:08:36 +01002981 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002982 return;
2983
2984 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08002985 BUG_ON(!domain);
2986
2987 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002988
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07002989 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01002990 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2991 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002992 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002993
David Woodhoused794dc92009-06-28 00:27:49 +01002994 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2995 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002996
2997 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002998 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002999
David Woodhoused794dc92009-06-28 00:27:49 +01003000 /* free page tables */
3001 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3002
David Woodhouseacea0012009-07-14 01:55:11 +01003003 if (intel_iommu_strict) {
3004 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003005 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003006 /* free iova */
3007 __free_iova(&domain->iovad, iova);
3008 } else {
3009 add_unmap(domain, iova);
3010 /*
3011 * queue up the release of the unmap to save the 1/6th of the
3012 * cpu used up by the iotlb flush operation...
3013 */
3014 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003015}
3016
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003017static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003018 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003019{
3020 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003021 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003022
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003023 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003024 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003025 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003026 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003027 }
3028 return nelems;
3029}
3030
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003031static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3032 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003033{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003034 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003035 struct pci_dev *pdev = to_pci_dev(hwdev);
3036 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003037 size_t size = 0;
3038 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003039 struct iova *iova = NULL;
3040 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003041 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003042 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003043 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003044
3045 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003046 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003047 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003049 domain = get_valid_domain_for_dev(pdev);
3050 if (!domain)
3051 return 0;
3052
Weidong Han8c11e792008-12-08 15:29:22 +08003053 iommu = domain_get_iommu(domain);
3054
David Woodhouseb536d242009-06-28 14:49:31 +01003055 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003056 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003057
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003058 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3059 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003060 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003061 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003062 return 0;
3063 }
3064
3065 /*
3066 * Check if DMAR supports zero-length reads on write only
3067 * mappings..
3068 */
3069 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003070 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003071 prot |= DMA_PTE_READ;
3072 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3073 prot |= DMA_PTE_WRITE;
3074
David Woodhouseb536d242009-06-28 14:49:31 +01003075 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003076
Fenghua Yuf5329592009-08-04 15:09:37 -07003077 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003078 if (unlikely(ret)) {
3079 /* clear the page */
3080 dma_pte_clear_range(domain, start_vpfn,
3081 start_vpfn + size - 1);
3082 /* free page tables */
3083 dma_pte_free_pagetable(domain, start_vpfn,
3084 start_vpfn + size - 1);
3085 /* free iova */
3086 __free_iova(&domain->iovad, iova);
3087 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003088 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003089
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003090 /* it's a non-present to present mapping. Only flush if caching mode */
3091 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003092 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003093 else
Weidong Han8c11e792008-12-08 15:29:22 +08003094 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003095
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003096 return nelems;
3097}
3098
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003099static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3100{
3101 return !dma_addr;
3102}
3103
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003104struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003105 .alloc_coherent = intel_alloc_coherent,
3106 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003107 .map_sg = intel_map_sg,
3108 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003109 .map_page = intel_map_page,
3110 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003111 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003112};
3113
3114static inline int iommu_domain_cache_init(void)
3115{
3116 int ret = 0;
3117
3118 iommu_domain_cache = kmem_cache_create("iommu_domain",
3119 sizeof(struct dmar_domain),
3120 0,
3121 SLAB_HWCACHE_ALIGN,
3122
3123 NULL);
3124 if (!iommu_domain_cache) {
3125 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3126 ret = -ENOMEM;
3127 }
3128
3129 return ret;
3130}
3131
3132static inline int iommu_devinfo_cache_init(void)
3133{
3134 int ret = 0;
3135
3136 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3137 sizeof(struct device_domain_info),
3138 0,
3139 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003140 NULL);
3141 if (!iommu_devinfo_cache) {
3142 printk(KERN_ERR "Couldn't create devinfo cache\n");
3143 ret = -ENOMEM;
3144 }
3145
3146 return ret;
3147}
3148
3149static inline int iommu_iova_cache_init(void)
3150{
3151 int ret = 0;
3152
3153 iommu_iova_cache = kmem_cache_create("iommu_iova",
3154 sizeof(struct iova),
3155 0,
3156 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003157 NULL);
3158 if (!iommu_iova_cache) {
3159 printk(KERN_ERR "Couldn't create iova cache\n");
3160 ret = -ENOMEM;
3161 }
3162
3163 return ret;
3164}
3165
3166static int __init iommu_init_mempool(void)
3167{
3168 int ret;
3169 ret = iommu_iova_cache_init();
3170 if (ret)
3171 return ret;
3172
3173 ret = iommu_domain_cache_init();
3174 if (ret)
3175 goto domain_error;
3176
3177 ret = iommu_devinfo_cache_init();
3178 if (!ret)
3179 return ret;
3180
3181 kmem_cache_destroy(iommu_domain_cache);
3182domain_error:
3183 kmem_cache_destroy(iommu_iova_cache);
3184
3185 return -ENOMEM;
3186}
3187
3188static void __init iommu_exit_mempool(void)
3189{
3190 kmem_cache_destroy(iommu_devinfo_cache);
3191 kmem_cache_destroy(iommu_domain_cache);
3192 kmem_cache_destroy(iommu_iova_cache);
3193
3194}
3195
Dan Williams556ab452010-07-23 15:47:56 -07003196static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3197{
3198 struct dmar_drhd_unit *drhd;
3199 u32 vtbar;
3200 int rc;
3201
3202 /* We know that this device on this chipset has its own IOMMU.
3203 * If we find it under a different IOMMU, then the BIOS is lying
3204 * to us. Hope that the IOMMU for this device is actually
3205 * disabled, and it needs no translation...
3206 */
3207 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3208 if (rc) {
3209 /* "can't" happen */
3210 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3211 return;
3212 }
3213 vtbar &= 0xffff0000;
3214
3215 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3216 drhd = dmar_find_matched_drhd_unit(pdev);
3217 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3218 TAINT_FIRMWARE_WORKAROUND,
3219 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3220 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3221}
3222DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3223
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003224static void __init init_no_remapping_devices(void)
3225{
3226 struct dmar_drhd_unit *drhd;
3227
3228 for_each_drhd_unit(drhd) {
3229 if (!drhd->include_all) {
3230 int i;
3231 for (i = 0; i < drhd->devices_cnt; i++)
3232 if (drhd->devices[i] != NULL)
3233 break;
3234 /* ignore DMAR unit if no pci devices exist */
3235 if (i == drhd->devices_cnt)
3236 drhd->ignored = 1;
3237 }
3238 }
3239
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003240 for_each_drhd_unit(drhd) {
3241 int i;
3242 if (drhd->ignored || drhd->include_all)
3243 continue;
3244
3245 for (i = 0; i < drhd->devices_cnt; i++)
3246 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003247 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003248 break;
3249
3250 if (i < drhd->devices_cnt)
3251 continue;
3252
David Woodhousec0771df2011-10-14 20:59:46 +01003253 /* This IOMMU has *only* gfx devices. Either bypass it or
3254 set the gfx_mapped flag, as appropriate */
3255 if (dmar_map_gfx) {
3256 intel_iommu_gfx_mapped = 1;
3257 } else {
3258 drhd->ignored = 1;
3259 for (i = 0; i < drhd->devices_cnt; i++) {
3260 if (!drhd->devices[i])
3261 continue;
3262 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3263 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003264 }
3265 }
3266}
3267
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003268#ifdef CONFIG_SUSPEND
3269static int init_iommu_hw(void)
3270{
3271 struct dmar_drhd_unit *drhd;
3272 struct intel_iommu *iommu = NULL;
3273
3274 for_each_active_iommu(iommu, drhd)
3275 if (iommu->qi)
3276 dmar_reenable_qi(iommu);
3277
Joseph Cihulab7792602011-05-03 00:08:37 -07003278 for_each_iommu(iommu, drhd) {
3279 if (drhd->ignored) {
3280 /*
3281 * we always have to disable PMRs or DMA may fail on
3282 * this device
3283 */
3284 if (force_on)
3285 iommu_disable_protect_mem_regions(iommu);
3286 continue;
3287 }
3288
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003289 iommu_flush_write_buffer(iommu);
3290
3291 iommu_set_root_entry(iommu);
3292
3293 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003294 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003295 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003296 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003297 if (iommu_enable_translation(iommu))
3298 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003299 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003300 }
3301
3302 return 0;
3303}
3304
3305static void iommu_flush_all(void)
3306{
3307 struct dmar_drhd_unit *drhd;
3308 struct intel_iommu *iommu;
3309
3310 for_each_active_iommu(iommu, drhd) {
3311 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003312 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003313 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003314 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003315 }
3316}
3317
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003318static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003319{
3320 struct dmar_drhd_unit *drhd;
3321 struct intel_iommu *iommu = NULL;
3322 unsigned long flag;
3323
3324 for_each_active_iommu(iommu, drhd) {
3325 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3326 GFP_ATOMIC);
3327 if (!iommu->iommu_state)
3328 goto nomem;
3329 }
3330
3331 iommu_flush_all();
3332
3333 for_each_active_iommu(iommu, drhd) {
3334 iommu_disable_translation(iommu);
3335
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003336 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003337
3338 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3339 readl(iommu->reg + DMAR_FECTL_REG);
3340 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3341 readl(iommu->reg + DMAR_FEDATA_REG);
3342 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3343 readl(iommu->reg + DMAR_FEADDR_REG);
3344 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3345 readl(iommu->reg + DMAR_FEUADDR_REG);
3346
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003347 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003348 }
3349 return 0;
3350
3351nomem:
3352 for_each_active_iommu(iommu, drhd)
3353 kfree(iommu->iommu_state);
3354
3355 return -ENOMEM;
3356}
3357
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003358static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003359{
3360 struct dmar_drhd_unit *drhd;
3361 struct intel_iommu *iommu = NULL;
3362 unsigned long flag;
3363
3364 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003365 if (force_on)
3366 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3367 else
3368 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003369 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003370 }
3371
3372 for_each_active_iommu(iommu, drhd) {
3373
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003374 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003375
3376 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3377 iommu->reg + DMAR_FECTL_REG);
3378 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3379 iommu->reg + DMAR_FEDATA_REG);
3380 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3381 iommu->reg + DMAR_FEADDR_REG);
3382 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3383 iommu->reg + DMAR_FEUADDR_REG);
3384
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003385 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003386 }
3387
3388 for_each_active_iommu(iommu, drhd)
3389 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003390}
3391
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003392static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003393 .resume = iommu_resume,
3394 .suspend = iommu_suspend,
3395};
3396
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003397static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003398{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003399 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003400}
3401
3402#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003403static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003404#endif /* CONFIG_PM */
3405
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003406LIST_HEAD(dmar_rmrr_units);
3407
3408static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3409{
3410 list_add(&rmrr->list, &dmar_rmrr_units);
3411}
3412
3413
3414int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3415{
3416 struct acpi_dmar_reserved_memory *rmrr;
3417 struct dmar_rmrr_unit *rmrru;
3418
3419 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3420 if (!rmrru)
3421 return -ENOMEM;
3422
3423 rmrru->hdr = header;
3424 rmrr = (struct acpi_dmar_reserved_memory *)header;
3425 rmrru->base_address = rmrr->base_address;
3426 rmrru->end_address = rmrr->end_address;
3427
3428 dmar_register_rmrr_unit(rmrru);
3429 return 0;
3430}
3431
3432static int __init
3433rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3434{
3435 struct acpi_dmar_reserved_memory *rmrr;
3436 int ret;
3437
3438 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3439 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3440 ((void *)rmrr) + rmrr->header.length,
3441 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3442
3443 if (ret || (rmrru->devices_cnt == 0)) {
3444 list_del(&rmrru->list);
3445 kfree(rmrru);
3446 }
3447 return ret;
3448}
3449
3450static LIST_HEAD(dmar_atsr_units);
3451
3452int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3453{
3454 struct acpi_dmar_atsr *atsr;
3455 struct dmar_atsr_unit *atsru;
3456
3457 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3458 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3459 if (!atsru)
3460 return -ENOMEM;
3461
3462 atsru->hdr = hdr;
3463 atsru->include_all = atsr->flags & 0x1;
3464
3465 list_add(&atsru->list, &dmar_atsr_units);
3466
3467 return 0;
3468}
3469
3470static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3471{
3472 int rc;
3473 struct acpi_dmar_atsr *atsr;
3474
3475 if (atsru->include_all)
3476 return 0;
3477
3478 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3479 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3480 (void *)atsr + atsr->header.length,
3481 &atsru->devices_cnt, &atsru->devices,
3482 atsr->segment);
3483 if (rc || !atsru->devices_cnt) {
3484 list_del(&atsru->list);
3485 kfree(atsru);
3486 }
3487
3488 return rc;
3489}
3490
3491int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3492{
3493 int i;
3494 struct pci_bus *bus;
3495 struct acpi_dmar_atsr *atsr;
3496 struct dmar_atsr_unit *atsru;
3497
3498 dev = pci_physfn(dev);
3499
3500 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3501 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3502 if (atsr->segment == pci_domain_nr(dev->bus))
3503 goto found;
3504 }
3505
3506 return 0;
3507
3508found:
3509 for (bus = dev->bus; bus; bus = bus->parent) {
3510 struct pci_dev *bridge = bus->self;
3511
3512 if (!bridge || !pci_is_pcie(bridge) ||
3513 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3514 return 0;
3515
3516 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3517 for (i = 0; i < atsru->devices_cnt; i++)
3518 if (atsru->devices[i] == bridge)
3519 return 1;
3520 break;
3521 }
3522 }
3523
3524 if (atsru->include_all)
3525 return 1;
3526
3527 return 0;
3528}
3529
3530int dmar_parse_rmrr_atsr_dev(void)
3531{
3532 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3533 struct dmar_atsr_unit *atsr, *atsr_n;
3534 int ret = 0;
3535
3536 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3537 ret = rmrr_parse_dev(rmrr);
3538 if (ret)
3539 return ret;
3540 }
3541
3542 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3543 ret = atsr_parse_dev(atsr);
3544 if (ret)
3545 return ret;
3546 }
3547
3548 return ret;
3549}
3550
Fenghua Yu99dcade2009-11-11 07:23:06 -08003551/*
3552 * Here we only respond to action of unbound device from driver.
3553 *
3554 * Added device is not attached to its DMAR domain here yet. That will happen
3555 * when mapping the device to iova.
3556 */
3557static int device_notifier(struct notifier_block *nb,
3558 unsigned long action, void *data)
3559{
3560 struct device *dev = data;
3561 struct pci_dev *pdev = to_pci_dev(dev);
3562 struct dmar_domain *domain;
3563
David Woodhouse44cd6132009-12-02 10:18:30 +00003564 if (iommu_no_mapping(dev))
3565 return 0;
3566
Fenghua Yu99dcade2009-11-11 07:23:06 -08003567 domain = find_domain(pdev);
3568 if (!domain)
3569 return 0;
3570
Alex Williamsona97590e2011-03-04 14:52:16 -07003571 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003572 domain_remove_one_dev_info(domain, pdev);
3573
Alex Williamsona97590e2011-03-04 14:52:16 -07003574 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3575 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3576 list_empty(&domain->devices))
3577 domain_exit(domain);
3578 }
3579
Fenghua Yu99dcade2009-11-11 07:23:06 -08003580 return 0;
3581}
3582
3583static struct notifier_block device_nb = {
3584 .notifier_call = device_notifier,
3585};
3586
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003587int __init intel_iommu_init(void)
3588{
3589 int ret = 0;
3590
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003591 /* VT-d is required for a TXT/tboot launch, so enforce that */
3592 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003593
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003594 if (dmar_table_init()) {
3595 if (force_on)
3596 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003597 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003598 }
3599
Suresh Siddhac2c72862011-08-23 17:05:19 -07003600 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003601 if (force_on)
3602 panic("tboot: Failed to initialize DMAR device scope\n");
3603 return -ENODEV;
3604 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003605
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003606 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003607 return -ENODEV;
3608
Joseph Cihula51a63e62011-03-21 11:04:24 -07003609 if (iommu_init_mempool()) {
3610 if (force_on)
3611 panic("tboot: Failed to initialize iommu memory\n");
3612 return -ENODEV;
3613 }
3614
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003615 if (list_empty(&dmar_rmrr_units))
3616 printk(KERN_INFO "DMAR: No RMRR found\n");
3617
3618 if (list_empty(&dmar_atsr_units))
3619 printk(KERN_INFO "DMAR: No ATSR found\n");
3620
Joseph Cihula51a63e62011-03-21 11:04:24 -07003621 if (dmar_init_reserved_ranges()) {
3622 if (force_on)
3623 panic("tboot: Failed to reserve iommu ranges\n");
3624 return -ENODEV;
3625 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003626
3627 init_no_remapping_devices();
3628
Joseph Cihulab7792602011-05-03 00:08:37 -07003629 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003630 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003631 if (force_on)
3632 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003633 printk(KERN_ERR "IOMMU: dmar init failed\n");
3634 put_iova_domain(&reserved_iova_list);
3635 iommu_exit_mempool();
3636 return ret;
3637 }
3638 printk(KERN_INFO
3639 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3640
mark gross5e0d2a62008-03-04 15:22:08 -08003641 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003642#ifdef CONFIG_SWIOTLB
3643 swiotlb = 0;
3644#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003645 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003646
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003647 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003648
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003649 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003650
Fenghua Yu99dcade2009-11-11 07:23:06 -08003651 bus_register_notifier(&pci_bus_type, &device_nb);
3652
Eugeni Dodonov8bc1f852011-11-23 16:42:14 -02003653 intel_iommu_enabled = 1;
3654
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003655 return 0;
3656}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003657
Han, Weidong3199aa62009-02-26 17:31:12 +08003658static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3659 struct pci_dev *pdev)
3660{
3661 struct pci_dev *tmp, *parent;
3662
3663 if (!iommu || !pdev)
3664 return;
3665
3666 /* dependent device detach */
3667 tmp = pci_find_upstream_pcie_bridge(pdev);
3668 /* Secondary interface's bus number and devfn 0 */
3669 if (tmp) {
3670 parent = pdev->bus->self;
3671 while (parent != tmp) {
3672 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003673 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003674 parent = parent->bus->self;
3675 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003676 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003677 iommu_detach_dev(iommu,
3678 tmp->subordinate->number, 0);
3679 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003680 iommu_detach_dev(iommu, tmp->bus->number,
3681 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003682 }
3683}
3684
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003685static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003686 struct pci_dev *pdev)
3687{
3688 struct device_domain_info *info;
3689 struct intel_iommu *iommu;
3690 unsigned long flags;
3691 int found = 0;
3692 struct list_head *entry, *tmp;
3693
David Woodhouse276dbf992009-04-04 01:45:37 +01003694 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3695 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003696 if (!iommu)
3697 return;
3698
3699 spin_lock_irqsave(&device_domain_lock, flags);
3700 list_for_each_safe(entry, tmp, &domain->devices) {
3701 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003702 if (info->segment == pci_domain_nr(pdev->bus) &&
3703 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003704 info->devfn == pdev->devfn) {
3705 list_del(&info->link);
3706 list_del(&info->global);
3707 if (info->dev)
3708 info->dev->dev.archdata.iommu = NULL;
3709 spin_unlock_irqrestore(&device_domain_lock, flags);
3710
Yu Zhao93a23a72009-05-18 13:51:37 +08003711 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003712 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003713 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003714 free_devinfo_mem(info);
3715
3716 spin_lock_irqsave(&device_domain_lock, flags);
3717
3718 if (found)
3719 break;
3720 else
3721 continue;
3722 }
3723
3724 /* if there is no other devices under the same iommu
3725 * owned by this domain, clear this iommu in iommu_bmp
3726 * update iommu count and coherency
3727 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003728 if (iommu == device_to_iommu(info->segment, info->bus,
3729 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003730 found = 1;
3731 }
3732
Roland Dreier3e7abe22011-07-20 06:22:21 -07003733 spin_unlock_irqrestore(&device_domain_lock, flags);
3734
Weidong Hanc7151a82008-12-08 22:51:37 +08003735 if (found == 0) {
3736 unsigned long tmp_flags;
3737 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3738 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3739 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003740 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003741 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003742
Alex Williamson9b4554b2011-05-24 12:19:04 -04003743 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3744 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3745 spin_lock_irqsave(&iommu->lock, tmp_flags);
3746 clear_bit(domain->id, iommu->domain_ids);
3747 iommu->domains[domain->id] = NULL;
3748 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3749 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003750 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003751}
3752
3753static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3754{
3755 struct device_domain_info *info;
3756 struct intel_iommu *iommu;
3757 unsigned long flags1, flags2;
3758
3759 spin_lock_irqsave(&device_domain_lock, flags1);
3760 while (!list_empty(&domain->devices)) {
3761 info = list_entry(domain->devices.next,
3762 struct device_domain_info, link);
3763 list_del(&info->link);
3764 list_del(&info->global);
3765 if (info->dev)
3766 info->dev->dev.archdata.iommu = NULL;
3767
3768 spin_unlock_irqrestore(&device_domain_lock, flags1);
3769
Yu Zhao93a23a72009-05-18 13:51:37 +08003770 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003771 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003772 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003773 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003774
3775 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003776 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003777 */
3778 spin_lock_irqsave(&domain->iommu_lock, flags2);
3779 if (test_and_clear_bit(iommu->seq_id,
3780 &domain->iommu_bmp)) {
3781 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003782 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003783 }
3784 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3785
3786 free_devinfo_mem(info);
3787 spin_lock_irqsave(&device_domain_lock, flags1);
3788 }
3789 spin_unlock_irqrestore(&device_domain_lock, flags1);
3790}
3791
Weidong Han5e98c4b2008-12-08 23:03:27 +08003792/* domain id for virtual machine, it won't be set in context */
3793static unsigned long vm_domid;
3794
3795static struct dmar_domain *iommu_alloc_vm_domain(void)
3796{
3797 struct dmar_domain *domain;
3798
3799 domain = alloc_domain_mem();
3800 if (!domain)
3801 return NULL;
3802
3803 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003804 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003805 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3806 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3807
3808 return domain;
3809}
3810
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003811static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003812{
3813 int adjust_width;
3814
3815 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003816 spin_lock_init(&domain->iommu_lock);
3817
3818 domain_reserve_special_ranges(domain);
3819
3820 /* calculate AGAW */
3821 domain->gaw = guest_width;
3822 adjust_width = guestwidth_to_adjustwidth(guest_width);
3823 domain->agaw = width_to_agaw(adjust_width);
3824
3825 INIT_LIST_HEAD(&domain->devices);
3826
3827 domain->iommu_count = 0;
3828 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003829 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003830 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003831 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003832 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003833
3834 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003835 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003836 if (!domain->pgd)
3837 return -ENOMEM;
3838 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3839 return 0;
3840}
3841
3842static void iommu_free_vm_domain(struct dmar_domain *domain)
3843{
3844 unsigned long flags;
3845 struct dmar_drhd_unit *drhd;
3846 struct intel_iommu *iommu;
3847 unsigned long i;
3848 unsigned long ndomains;
3849
3850 for_each_drhd_unit(drhd) {
3851 if (drhd->ignored)
3852 continue;
3853 iommu = drhd->iommu;
3854
3855 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003856 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003857 if (iommu->domains[i] == domain) {
3858 spin_lock_irqsave(&iommu->lock, flags);
3859 clear_bit(i, iommu->domain_ids);
3860 iommu->domains[i] = NULL;
3861 spin_unlock_irqrestore(&iommu->lock, flags);
3862 break;
3863 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003864 }
3865 }
3866}
3867
3868static void vm_domain_exit(struct dmar_domain *domain)
3869{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003870 /* Domain 0 is reserved, so dont process it */
3871 if (!domain)
3872 return;
3873
3874 vm_domain_remove_all_dev_info(domain);
3875 /* destroy iovas */
3876 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003877
3878 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003879 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003880
3881 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003882 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003883
3884 iommu_free_vm_domain(domain);
3885 free_domain_mem(domain);
3886}
3887
Joerg Roedel5d450802008-12-03 14:52:32 +01003888static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003889{
Joerg Roedel5d450802008-12-03 14:52:32 +01003890 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003891
Joerg Roedel5d450802008-12-03 14:52:32 +01003892 dmar_domain = iommu_alloc_vm_domain();
3893 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003894 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003895 "intel_iommu_domain_init: dmar_domain == NULL\n");
3896 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003897 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003898 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003899 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003900 "intel_iommu_domain_init() failed\n");
3901 vm_domain_exit(dmar_domain);
3902 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003903 }
Allen Kay8140a952011-10-14 12:32:17 -07003904 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003905 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003906
Joerg Roedel5d450802008-12-03 14:52:32 +01003907 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003908}
Kay, Allen M38717942008-09-09 18:37:29 +03003909
Joerg Roedel5d450802008-12-03 14:52:32 +01003910static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003911{
Joerg Roedel5d450802008-12-03 14:52:32 +01003912 struct dmar_domain *dmar_domain = domain->priv;
3913
3914 domain->priv = NULL;
3915 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003916}
Kay, Allen M38717942008-09-09 18:37:29 +03003917
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003918static int intel_iommu_attach_device(struct iommu_domain *domain,
3919 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003920{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003921 struct dmar_domain *dmar_domain = domain->priv;
3922 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003923 struct intel_iommu *iommu;
3924 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003925
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003926 /* normally pdev is not mapped */
3927 if (unlikely(domain_context_mapped(pdev))) {
3928 struct dmar_domain *old_domain;
3929
3930 old_domain = find_domain(pdev);
3931 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003932 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3933 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3934 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003935 else
3936 domain_remove_dev_info(old_domain);
3937 }
3938 }
3939
David Woodhouse276dbf992009-04-04 01:45:37 +01003940 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3941 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003942 if (!iommu)
3943 return -ENODEV;
3944
3945 /* check if this iommu agaw is sufficient for max mapped address */
3946 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003947 if (addr_width > cap_mgaw(iommu->cap))
3948 addr_width = cap_mgaw(iommu->cap);
3949
3950 if (dmar_domain->max_addr > (1LL << addr_width)) {
3951 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003952 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003953 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003954 return -EFAULT;
3955 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003956 dmar_domain->gaw = addr_width;
3957
3958 /*
3959 * Knock out extra levels of page tables if necessary
3960 */
3961 while (iommu->agaw < dmar_domain->agaw) {
3962 struct dma_pte *pte;
3963
3964 pte = dmar_domain->pgd;
3965 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003966 dmar_domain->pgd = (struct dma_pte *)
3967 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003968 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003969 }
3970 dmar_domain->agaw--;
3971 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003972
David Woodhouse5fe60f42009-08-09 10:53:41 +01003973 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003974}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003975
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003976static void intel_iommu_detach_device(struct iommu_domain *domain,
3977 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003978{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003979 struct dmar_domain *dmar_domain = domain->priv;
3980 struct pci_dev *pdev = to_pci_dev(dev);
3981
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003982 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003983}
Kay, Allen M38717942008-09-09 18:37:29 +03003984
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003985static int intel_iommu_map(struct iommu_domain *domain,
3986 unsigned long iova, phys_addr_t hpa,
3987 int gfp_order, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03003988{
Joerg Roedeldde57a22008-12-03 15:04:09 +01003989 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003990 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01003991 int prot = 0;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003992 size_t size;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003993 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003994
Joerg Roedeldde57a22008-12-03 15:04:09 +01003995 if (iommu_prot & IOMMU_READ)
3996 prot |= DMA_PTE_READ;
3997 if (iommu_prot & IOMMU_WRITE)
3998 prot |= DMA_PTE_WRITE;
Sheng Yang9cf066972009-03-18 15:33:07 +08003999 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4000 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004001
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004002 size = PAGE_SIZE << gfp_order;
David Woodhouse163cc522009-06-28 00:51:17 +01004003 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004004 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004005 u64 end;
4006
4007 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004008 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004009 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004010 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004011 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004012 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004013 return -EFAULT;
4014 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004015 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004016 }
David Woodhousead051222009-06-28 14:22:28 +01004017 /* Round up size to next multiple of PAGE_SIZE, if it and
4018 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004019 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004020 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4021 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004022 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004023}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004024
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004025static int intel_iommu_unmap(struct iommu_domain *domain,
4026 unsigned long iova, int gfp_order)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004027{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004028 struct dmar_domain *dmar_domain = domain->priv;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004029 size_t size = PAGE_SIZE << gfp_order;
Allen Kay292827c2011-10-14 12:31:54 -07004030 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004031
Allen Kay292827c2011-10-14 12:31:54 -07004032 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004033 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004034
David Woodhouse163cc522009-06-28 00:51:17 +01004035 if (dmar_domain->max_addr == iova + size)
4036 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004037
Allen Kay292827c2011-10-14 12:31:54 -07004038 return order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004039}
Kay, Allen M38717942008-09-09 18:37:29 +03004040
Joerg Roedeld14d6572008-12-03 15:06:57 +01004041static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4042 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004043{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004044 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004045 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004046 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004047
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004048 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004049 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004050 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004051
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004052 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004053}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004054
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004055static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4056 unsigned long cap)
4057{
4058 struct dmar_domain *dmar_domain = domain->priv;
4059
4060 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4061 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004062 if (cap == IOMMU_CAP_INTR_REMAP)
4063 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004064
4065 return 0;
4066}
4067
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004068static struct iommu_ops intel_iommu_ops = {
4069 .domain_init = intel_iommu_domain_init,
4070 .domain_destroy = intel_iommu_domain_destroy,
4071 .attach_dev = intel_iommu_attach_device,
4072 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004073 .map = intel_iommu_map,
4074 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004075 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004076 .domain_has_cap = intel_iommu_domain_has_cap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004077};
David Woodhouse9af88142009-02-13 23:18:03 +00004078
4079static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4080{
4081 /*
4082 * Mobile 4 Series Chipset neglects to set RWBF capability,
4083 * but needs it:
4084 */
4085 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4086 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004087
4088 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4089 if (dev->revision == 0x07) {
4090 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4091 dmar_map_gfx = 0;
4092 }
David Woodhouse9af88142009-02-13 23:18:03 +00004093}
4094
4095DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004096
Adam Jacksoneecfd572010-08-25 21:17:34 +01004097#define GGC 0x52
4098#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4099#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4100#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4101#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4102#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4103#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4104#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4105#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4106
David Woodhouse9eecabc2010-09-21 22:28:23 +01004107static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4108{
4109 unsigned short ggc;
4110
Adam Jacksoneecfd572010-08-25 21:17:34 +01004111 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004112 return;
4113
Adam Jacksoneecfd572010-08-25 21:17:34 +01004114 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004115 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4116 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004117 } else if (dmar_map_gfx) {
4118 /* we have to ensure the gfx device is idle before we flush */
4119 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4120 intel_iommu_strict = 1;
4121 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004122}
4123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4124DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4126DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4127
David Woodhousee0fc7e02009-09-30 09:12:17 -07004128/* On Tylersburg chipsets, some BIOSes have been known to enable the
4129 ISOCH DMAR unit for the Azalia sound device, but not give it any
4130 TLB entries, which causes it to deadlock. Check for that. We do
4131 this in a function called from init_dmars(), instead of in a PCI
4132 quirk, because we don't want to print the obnoxious "BIOS broken"
4133 message if VT-d is actually disabled.
4134*/
4135static void __init check_tylersburg_isoch(void)
4136{
4137 struct pci_dev *pdev;
4138 uint32_t vtisochctrl;
4139
4140 /* If there's no Azalia in the system anyway, forget it. */
4141 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4142 if (!pdev)
4143 return;
4144 pci_dev_put(pdev);
4145
4146 /* System Management Registers. Might be hidden, in which case
4147 we can't do the sanity check. But that's OK, because the
4148 known-broken BIOSes _don't_ actually hide it, so far. */
4149 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4150 if (!pdev)
4151 return;
4152
4153 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4154 pci_dev_put(pdev);
4155 return;
4156 }
4157
4158 pci_dev_put(pdev);
4159
4160 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4161 if (vtisochctrl & 1)
4162 return;
4163
4164 /* Drop all bits other than the number of TLB entries */
4165 vtisochctrl &= 0x1c;
4166
4167 /* If we have the recommended number of TLB entries (16), fine. */
4168 if (vtisochctrl == 0x10)
4169 return;
4170
4171 /* Zero TLB entries? You get to ride the short bus to school. */
4172 if (!vtisochctrl) {
4173 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4174 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4175 dmi_get_system_info(DMI_BIOS_VENDOR),
4176 dmi_get_system_info(DMI_BIOS_VERSION),
4177 dmi_get_system_info(DMI_PRODUCT_VERSION));
4178 iommu_identity_mapping |= IDENTMAP_AZALIA;
4179 return;
4180 }
4181
4182 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4183 vtisochctrl);
4184}