blob: 4c780efff16936521c7d94e1d3ad19cf6b85e9b6 [file] [log] [blame]
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Fenghua Yu5b6985c2008-10-16 18:02:32 -070021 * Author: Fenghua Yu <fenghua.yu@intel.com>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070022 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
mark gross5e0d2a62008-03-04 15:22:08 -080026#include <linux/debugfs.h>
Paul Gortmaker54485c32011-10-29 10:26:25 -040027#include <linux/export.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070028#include <linux/slab.h>
29#include <linux/irq.h>
30#include <linux/interrupt.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070031#include <linux/spinlock.h>
32#include <linux/pci.h>
33#include <linux/dmar.h>
34#include <linux/dma-mapping.h>
35#include <linux/mempool.h>
mark gross5e0d2a62008-03-04 15:22:08 -080036#include <linux/timer.h>
Kay, Allen M38717942008-09-09 18:37:29 +030037#include <linux/iova.h>
Joerg Roedel5d450802008-12-03 14:52:32 +010038#include <linux/iommu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030039#include <linux/intel-iommu.h>
Rafael J. Wysocki134fac32011-03-23 22:16:14 +010040#include <linux/syscore_ops.h>
Shane Wang69575d32009-09-01 18:25:07 -070041#include <linux/tboot.h>
Stephen Rothwelladb2fe02009-08-31 15:24:23 +100042#include <linux/dmi.h>
Joerg Roedel5cdede22011-04-04 15:55:18 +020043#include <linux/pci-ats.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070044#include <asm/cacheflush.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090045#include <asm/iommu.h>
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070046
Fenghua Yu5b6985c2008-10-16 18:02:32 -070047#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE
49
Mike Travis825507d2011-05-28 13:15:06 -050050#define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070052#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
David Woodhousee0fc7e02009-09-30 09:12:17 -070054#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070055
56#define IOAPIC_RANGE_START (0xfee00000)
57#define IOAPIC_RANGE_END (0xfeefffff)
58#define IOVA_START_ADDR (0x1000)
59
60#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -070062#define MAX_AGAW_WIDTH 64
63
David Woodhouse2ebe3152009-09-19 07:34:04 -070064#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
66
67/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -070072
Mark McLoughlinf27be032008-11-20 15:49:43 +000073#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Yang Hongyang284901a2009-04-06 19:01:15 -070074#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
Yang Hongyang6a355282009-04-06 19:01:13 -070075#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
mark gross5e0d2a62008-03-04 15:22:08 -080076
Andrew Mortondf08cdc2010-09-22 13:05:11 -070077/* page table handling */
78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +020081/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
Andrew Mortondf08cdc2010-09-22 13:05:11 -070099static inline int agaw_to_level(int agaw)
100{
101 return agaw + 2;
102}
103
104static inline int agaw_to_width(int agaw)
105{
106 return 30 + agaw * LEVEL_STRIDE;
107}
108
109static inline int width_to_agaw(int width)
110{
111 return (width - 30) / LEVEL_STRIDE;
112}
113
114static inline unsigned int level_to_offset_bits(int level)
115{
116 return (level - 1) * LEVEL_STRIDE;
117}
118
119static inline int pfn_level_offset(unsigned long pfn, int level)
120{
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
122}
123
124static inline unsigned long level_mask(int level)
125{
126 return -1UL << level_to_offset_bits(level);
127}
128
129static inline unsigned long level_size(int level)
130{
131 return 1UL << level_to_offset_bits(level);
132}
133
134static inline unsigned long align_to_level(unsigned long pfn, int level)
135{
136 return (pfn + level_size(level) - 1) & level_mask(level);
137}
David Woodhousefd18de52009-05-10 23:57:41 +0100138
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100139static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
140{
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
142}
143
David Woodhousedd4e8312009-06-27 16:21:20 +0100144/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
147{
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
149}
150
151static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
152{
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
154}
155static inline unsigned long page_to_dma_pfn(struct page *pg)
156{
157 return mm_to_dma_pfn(page_to_pfn(pg));
158}
159static inline unsigned long virt_to_dma_pfn(void *p)
160{
161 return page_to_dma_pfn(virt_to_page(p));
162}
163
Weidong Hand9630fe2008-12-08 11:06:32 +0800164/* global iommu list, set NULL for ignored DMAR units */
165static struct intel_iommu **g_iommus;
166
David Woodhousee0fc7e02009-09-30 09:12:17 -0700167static void __init check_tylersburg_isoch(void);
David Woodhouse9af88142009-02-13 23:18:03 +0000168static int rwbf_quirk;
169
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000170/*
Joseph Cihulab7792602011-05-03 00:08:37 -0700171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
173 */
174static int force_on = 0;
175
176/*
Mark McLoughlin46b08e12008-11-20 15:49:44 +0000177 * 0: Present
178 * 1-11: Reserved
179 * 12-63: Context Ptr (12 - (haw-1))
180 * 64-127: Reserved
181 */
182struct root_entry {
183 u64 val;
184 u64 rsvd1;
185};
186#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187static inline bool root_present(struct root_entry *root)
188{
189 return (root->val & 1);
190}
191static inline void set_root_present(struct root_entry *root)
192{
193 root->val |= 1;
194}
195static inline void set_root_value(struct root_entry *root, unsigned long value)
196{
197 root->val |= value & VTD_PAGE_MASK;
198}
199
200static inline struct context_entry *
201get_context_addr_from_root(struct root_entry *root)
202{
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
206 NULL);
207}
208
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000209/*
210 * low 64 bits:
211 * 0: present
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
215 * high 64 bits:
216 * 0-2: address width
217 * 3-6: aval
218 * 8-23: domain id
219 */
220struct context_entry {
221 u64 lo;
222 u64 hi;
223};
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000224
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000225static inline bool context_present(struct context_entry *context)
226{
227 return (context->lo & 1);
228}
229static inline void context_set_present(struct context_entry *context)
230{
231 context->lo |= 1;
232}
233
234static inline void context_set_fault_enable(struct context_entry *context)
235{
236 context->lo &= (((u64)-1) << 2) | 1;
237}
238
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000239static inline void context_set_translation_type(struct context_entry *context,
240 unsigned long value)
241{
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
244}
245
246static inline void context_set_address_root(struct context_entry *context,
247 unsigned long value)
248{
249 context->lo |= value & VTD_PAGE_MASK;
250}
251
252static inline void context_set_address_width(struct context_entry *context,
253 unsigned long value)
254{
255 context->hi |= value & 7;
256}
257
258static inline void context_set_domain_id(struct context_entry *context,
259 unsigned long value)
260{
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
262}
263
264static inline void context_clear_entry(struct context_entry *context)
265{
266 context->lo = 0;
267 context->hi = 0;
268}
Mark McLoughlin7a8fc252008-11-20 15:49:45 +0000269
Mark McLoughlin622ba122008-11-20 15:49:46 +0000270/*
271 * 0: readable
272 * 1: writable
273 * 2-6: reserved
274 * 7: super page
Sheng Yang9cf06692009-03-18 15:33:07 +0800275 * 8-10: available
276 * 11: snoop behavior
Mark McLoughlin622ba122008-11-20 15:49:46 +0000277 * 12-63: Host physcial address
278 */
279struct dma_pte {
280 u64 val;
281};
Mark McLoughlin622ba122008-11-20 15:49:46 +0000282
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000283static inline void dma_clear_pte(struct dma_pte *pte)
284{
285 pte->val = 0;
286}
287
288static inline void dma_set_pte_readable(struct dma_pte *pte)
289{
290 pte->val |= DMA_PTE_READ;
291}
292
293static inline void dma_set_pte_writable(struct dma_pte *pte)
294{
295 pte->val |= DMA_PTE_WRITE;
296}
297
Sheng Yang9cf06692009-03-18 15:33:07 +0800298static inline void dma_set_pte_snp(struct dma_pte *pte)
299{
300 pte->val |= DMA_PTE_SNP;
301}
302
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000303static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
304{
305 pte->val = (pte->val & ~3) | (prot & 3);
306}
307
308static inline u64 dma_pte_addr(struct dma_pte *pte)
309{
David Woodhousec85994e2009-07-01 19:21:24 +0100310#ifdef CONFIG_64BIT
311 return pte->val & VTD_PAGE_MASK;
312#else
313 /* Must have a full atomic 64-bit read */
David Woodhouse1a8bd482010-08-10 01:38:53 +0100314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
David Woodhousec85994e2009-07-01 19:21:24 +0100315#endif
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000316}
317
David Woodhousedd4e8312009-06-27 16:21:20 +0100318static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000319{
David Woodhousedd4e8312009-06-27 16:21:20 +0100320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000321}
322
323static inline bool dma_pte_present(struct dma_pte *pte)
324{
325 return (pte->val & 3) != 0;
326}
Mark McLoughlin622ba122008-11-20 15:49:46 +0000327
Allen Kay4399c8b2011-10-14 12:32:46 -0700328static inline bool dma_pte_superpage(struct dma_pte *pte)
329{
330 return (pte->val & (1 << 7));
331}
332
David Woodhouse75e6bf92009-07-02 11:21:16 +0100333static inline int first_pte_in_page(struct dma_pte *pte)
334{
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
336}
337
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700338/*
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
343 */
David Woodhouse19943b02009-08-04 16:19:20 +0100344static struct dmar_domain *si_domain;
345static int hw_pass_through = 1;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700346
Weidong Han3b5410e2008-12-08 09:17:15 +0800347/* devices under the same p2p bridge are owned in one domain */
Mike Daycdc7b832008-12-12 17:16:30 +0100348#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
Weidong Han3b5410e2008-12-08 09:17:15 +0800349
Weidong Han1ce28fe2008-12-08 16:35:39 +0800350/* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
352 */
353#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
354
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700355/* si_domain contains mulitple devices */
356#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
357
Mark McLoughlin99126f72008-11-20 15:49:47 +0000358struct dmar_domain {
359 int id; /* domain id */
Suresh Siddha4c923d42009-10-02 11:01:24 -0700360 int nid; /* node id */
Weidong Han8c11e792008-12-08 15:29:22 +0800361 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
Mark McLoughlin99126f72008-11-20 15:49:47 +0000362
363 struct list_head devices; /* all devices' list */
364 struct iova_domain iovad; /* iova's that belong to this domain */
365
366 struct dma_pte *pgd; /* virtual address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000367 int gaw; /* max guest address width */
368
369 /* adjusted guest address width, 0 is level 2 30-bit */
370 int agaw;
371
Weidong Han3b5410e2008-12-08 09:17:15 +0800372 int flags; /* flags to find out type of domain */
Weidong Han8e6040972008-12-08 15:49:06 +0800373
374 int iommu_coherency;/* indicate coherency of iommu access */
Sheng Yang58c610b2009-03-18 15:33:05 +0800375 int iommu_snooping; /* indicate snooping control feature*/
Weidong Hanc7151a82008-12-08 22:51:37 +0800376 int iommu_count; /* reference count of iommu */
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100377 int iommu_superpage;/* Level of superpages supported:
378 0 == 4KiB (no superpages), 1 == 2MiB,
379 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
Weidong Hanc7151a82008-12-08 22:51:37 +0800380 spinlock_t iommu_lock; /* protect iommu set in domain */
Weidong Hanfe40f1e2008-12-08 23:10:23 +0800381 u64 max_addr; /* maximum mapped address */
Mark McLoughlin99126f72008-11-20 15:49:47 +0000382};
383
Mark McLoughlina647dac2008-11-20 15:49:48 +0000384/* PCI domain-device relationship */
385struct device_domain_info {
386 struct list_head link; /* link to domain siblings */
387 struct list_head global; /* link to global list */
David Woodhouse276dbf992009-04-04 01:45:37 +0100388 int segment; /* PCI domain */
389 u8 bus; /* PCI bus number */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000390 u8 devfn; /* PCI devfn number */
Stefan Assmann45e829e2009-12-03 06:49:24 -0500391 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
Yu Zhao93a23a72009-05-18 13:51:37 +0800392 struct intel_iommu *iommu; /* IOMMU used by this device */
Mark McLoughlina647dac2008-11-20 15:49:48 +0000393 struct dmar_domain *domain; /* pointer to domain */
394};
395
mark gross5e0d2a62008-03-04 15:22:08 -0800396static void flush_unmaps_timeout(unsigned long data);
397
398DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
399
mark gross80b20dd2008-04-18 13:53:58 -0700400#define HIGH_WATER_MARK 250
401struct deferred_flush_tables {
402 int next;
403 struct iova *iova[HIGH_WATER_MARK];
404 struct dmar_domain *domain[HIGH_WATER_MARK];
405};
406
407static struct deferred_flush_tables *deferred_flush;
408
mark gross5e0d2a62008-03-04 15:22:08 -0800409/* bitmap for indexing intel_iommus */
mark gross5e0d2a62008-03-04 15:22:08 -0800410static int g_num_of_iommus;
411
412static DEFINE_SPINLOCK(async_umap_flush_lock);
413static LIST_HEAD(unmaps_to_do);
414
415static int timer_on;
416static long list_size;
mark gross5e0d2a62008-03-04 15:22:08 -0800417
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700418static void domain_remove_dev_info(struct dmar_domain *domain);
419
Suresh Siddhad3f13812011-08-23 17:05:25 -0700420#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800421int dmar_disabled = 0;
422#else
423int dmar_disabled = 1;
Suresh Siddhad3f13812011-08-23 17:05:25 -0700424#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800425
David Woodhouse2d9e6672010-06-15 10:57:57 +0100426static int dmar_map_gfx = 1;
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700427static int dmar_forcedac;
mark gross5e0d2a62008-03-04 15:22:08 -0800428static int intel_iommu_strict;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100429static int intel_iommu_superpage = 1;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700430
David Woodhousec0771df2011-10-14 20:59:46 +0100431int intel_iommu_gfx_mapped;
432EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
433
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700434#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
435static DEFINE_SPINLOCK(device_domain_lock);
436static LIST_HEAD(device_domain_list);
437
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +0100438static struct iommu_ops intel_iommu_ops;
439
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700440static int __init intel_iommu_setup(char *str)
441{
442 if (!str)
443 return -EINVAL;
444 while (*str) {
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800445 if (!strncmp(str, "on", 2)) {
446 dmar_disabled = 0;
447 printk(KERN_INFO "Intel-IOMMU: enabled\n");
448 } else if (!strncmp(str, "off", 3)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700449 dmar_disabled = 1;
Kyle McMartin0cd5c3c2009-02-04 14:29:19 -0800450 printk(KERN_INFO "Intel-IOMMU: disabled\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700451 } else if (!strncmp(str, "igfx_off", 8)) {
452 dmar_map_gfx = 0;
453 printk(KERN_INFO
454 "Intel-IOMMU: disable GFX device mapping\n");
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700455 } else if (!strncmp(str, "forcedac", 8)) {
mark gross5e0d2a62008-03-04 15:22:08 -0800456 printk(KERN_INFO
Keshavamurthy, Anil S7d3b03c2007-10-21 16:41:53 -0700457 "Intel-IOMMU: Forcing DAC for PCI devices\n");
458 dmar_forcedac = 1;
mark gross5e0d2a62008-03-04 15:22:08 -0800459 } else if (!strncmp(str, "strict", 6)) {
460 printk(KERN_INFO
461 "Intel-IOMMU: disable batched IOTLB flush\n");
462 intel_iommu_strict = 1;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100463 } else if (!strncmp(str, "sp_off", 6)) {
464 printk(KERN_INFO
465 "Intel-IOMMU: disable supported super page\n");
466 intel_iommu_superpage = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700467 }
468
469 str += strcspn(str, ",");
470 while (*str == ',')
471 str++;
472 }
473 return 0;
474}
475__setup("intel_iommu=", intel_iommu_setup);
476
477static struct kmem_cache *iommu_domain_cache;
478static struct kmem_cache *iommu_devinfo_cache;
479static struct kmem_cache *iommu_iova_cache;
480
Suresh Siddha4c923d42009-10-02 11:01:24 -0700481static inline void *alloc_pgtable_page(int node)
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700482{
Suresh Siddha4c923d42009-10-02 11:01:24 -0700483 struct page *page;
484 void *vaddr = NULL;
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700485
Suresh Siddha4c923d42009-10-02 11:01:24 -0700486 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
487 if (page)
488 vaddr = page_address(page);
Keshavamurthy, Anil Seb3fa7c2007-10-21 16:41:52 -0700489 return vaddr;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700490}
491
492static inline void free_pgtable_page(void *vaddr)
493{
494 free_page((unsigned long)vaddr);
495}
496
497static inline void *alloc_domain_mem(void)
498{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900499 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700500}
501
Kay, Allen M38717942008-09-09 18:37:29 +0300502static void free_domain_mem(void *vaddr)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700503{
504 kmem_cache_free(iommu_domain_cache, vaddr);
505}
506
507static inline void * alloc_devinfo_mem(void)
508{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900509 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700510}
511
512static inline void free_devinfo_mem(void *vaddr)
513{
514 kmem_cache_free(iommu_devinfo_cache, vaddr);
515}
516
517struct iova *alloc_iova_mem(void)
518{
KOSAKI Motohiro354bb652009-11-17 16:21:09 +0900519 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700520}
521
522void free_iova_mem(struct iova *iova)
523{
524 kmem_cache_free(iommu_iova_cache, iova);
525}
526
Weidong Han1b573682008-12-08 15:34:06 +0800527
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700528static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
Weidong Han1b573682008-12-08 15:34:06 +0800529{
530 unsigned long sagaw;
531 int agaw = -1;
532
533 sagaw = cap_sagaw(iommu->cap);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700534 for (agaw = width_to_agaw(max_gaw);
Weidong Han1b573682008-12-08 15:34:06 +0800535 agaw >= 0; agaw--) {
536 if (test_bit(agaw, &sagaw))
537 break;
538 }
539
540 return agaw;
541}
542
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -0700543/*
544 * Calculate max SAGAW for each iommu.
545 */
546int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
547{
548 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
549}
550
551/*
552 * calculate agaw for each iommu.
553 * "SAGAW" may be different across iommus, use a default agaw, and
554 * get a supported less agaw for iommus that don't support the default agaw.
555 */
556int iommu_calculate_agaw(struct intel_iommu *iommu)
557{
558 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
559}
560
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700561/* This functionin only returns single iommu in a domain */
Weidong Han8c11e792008-12-08 15:29:22 +0800562static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
563{
564 int iommu_id;
565
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700566 /* si_domain and vm domain should not get here. */
Weidong Han1ce28fe2008-12-08 16:35:39 +0800567 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -0700568 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
Weidong Han1ce28fe2008-12-08 16:35:39 +0800569
Weidong Han8c11e792008-12-08 15:29:22 +0800570 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
571 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
572 return NULL;
573
574 return g_iommus[iommu_id];
575}
576
Weidong Han8e6040972008-12-08 15:49:06 +0800577static void domain_update_iommu_coherency(struct dmar_domain *domain)
578{
579 int i;
580
581 domain->iommu_coherency = 1;
582
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800583 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Weidong Han8e6040972008-12-08 15:49:06 +0800584 if (!ecap_coherent(g_iommus[i]->ecap)) {
585 domain->iommu_coherency = 0;
586 break;
587 }
Weidong Han8e6040972008-12-08 15:49:06 +0800588 }
589}
590
Sheng Yang58c610b2009-03-18 15:33:05 +0800591static void domain_update_iommu_snooping(struct dmar_domain *domain)
592{
593 int i;
594
595 domain->iommu_snooping = 1;
596
Akinobu Mitaa45946a2010-03-11 14:04:08 -0800597 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
Sheng Yang58c610b2009-03-18 15:33:05 +0800598 if (!ecap_sc_support(g_iommus[i]->ecap)) {
599 domain->iommu_snooping = 0;
600 break;
601 }
Sheng Yang58c610b2009-03-18 15:33:05 +0800602 }
603}
604
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100605static void domain_update_iommu_superpage(struct dmar_domain *domain)
606{
Allen Kay8140a952011-10-14 12:32:17 -0700607 struct dmar_drhd_unit *drhd;
608 struct intel_iommu *iommu = NULL;
609 int mask = 0xf;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100610
611 if (!intel_iommu_superpage) {
612 domain->iommu_superpage = 0;
613 return;
614 }
615
Allen Kay8140a952011-10-14 12:32:17 -0700616 /* set iommu_superpage to the smallest common denominator */
617 for_each_active_iommu(iommu, drhd) {
618 mask &= cap_super_page_val(iommu->cap);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100619 if (!mask) {
620 break;
621 }
622 }
623 domain->iommu_superpage = fls(mask);
624}
625
Sheng Yang58c610b2009-03-18 15:33:05 +0800626/* Some capabilities may be different across iommus */
627static void domain_update_iommu_cap(struct dmar_domain *domain)
628{
629 domain_update_iommu_coherency(domain);
630 domain_update_iommu_snooping(domain);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100631 domain_update_iommu_superpage(domain);
Sheng Yang58c610b2009-03-18 15:33:05 +0800632}
633
David Woodhouse276dbf992009-04-04 01:45:37 +0100634static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
Weidong Hanc7151a82008-12-08 22:51:37 +0800635{
636 struct dmar_drhd_unit *drhd = NULL;
637 int i;
638
639 for_each_drhd_unit(drhd) {
640 if (drhd->ignored)
641 continue;
David Woodhouse276dbf992009-04-04 01:45:37 +0100642 if (segment != drhd->segment)
643 continue;
Weidong Hanc7151a82008-12-08 22:51:37 +0800644
David Woodhouse924b6232009-04-04 00:39:25 +0100645 for (i = 0; i < drhd->devices_cnt; i++) {
Dirk Hohndel288e4872009-01-11 15:33:51 +0000646 if (drhd->devices[i] &&
647 drhd->devices[i]->bus->number == bus &&
Weidong Hanc7151a82008-12-08 22:51:37 +0800648 drhd->devices[i]->devfn == devfn)
649 return drhd->iommu;
David Woodhouse4958c5d2009-04-06 13:30:01 -0700650 if (drhd->devices[i] &&
651 drhd->devices[i]->subordinate &&
David Woodhouse924b6232009-04-04 00:39:25 +0100652 drhd->devices[i]->subordinate->number <= bus &&
653 drhd->devices[i]->subordinate->subordinate >= bus)
654 return drhd->iommu;
655 }
Weidong Hanc7151a82008-12-08 22:51:37 +0800656
657 if (drhd->include_all)
658 return drhd->iommu;
659 }
660
661 return NULL;
662}
663
Weidong Han5331fe62008-12-08 23:00:00 +0800664static void domain_flush_cache(struct dmar_domain *domain,
665 void *addr, int size)
666{
667 if (!domain->iommu_coherency)
668 clflush_cache_range(addr, size);
669}
670
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700671/* Gets context entry for a given bus and devfn */
672static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
673 u8 bus, u8 devfn)
674{
675 struct root_entry *root;
676 struct context_entry *context;
677 unsigned long phy_addr;
678 unsigned long flags;
679
680 spin_lock_irqsave(&iommu->lock, flags);
681 root = &iommu->root_entry[bus];
682 context = get_context_addr_from_root(root);
683 if (!context) {
Suresh Siddha4c923d42009-10-02 11:01:24 -0700684 context = (struct context_entry *)
685 alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700686 if (!context) {
687 spin_unlock_irqrestore(&iommu->lock, flags);
688 return NULL;
689 }
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700690 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700691 phy_addr = virt_to_phys((void *)context);
692 set_root_value(root, phy_addr);
693 set_root_present(root);
694 __iommu_flush_cache(iommu, root, sizeof(*root));
695 }
696 spin_unlock_irqrestore(&iommu->lock, flags);
697 return &context[devfn];
698}
699
700static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
701{
702 struct root_entry *root;
703 struct context_entry *context;
704 int ret;
705 unsigned long flags;
706
707 spin_lock_irqsave(&iommu->lock, flags);
708 root = &iommu->root_entry[bus];
709 context = get_context_addr_from_root(root);
710 if (!context) {
711 ret = 0;
712 goto out;
713 }
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000714 ret = context_present(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700715out:
716 spin_unlock_irqrestore(&iommu->lock, flags);
717 return ret;
718}
719
720static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
721{
722 struct root_entry *root;
723 struct context_entry *context;
724 unsigned long flags;
725
726 spin_lock_irqsave(&iommu->lock, flags);
727 root = &iommu->root_entry[bus];
728 context = get_context_addr_from_root(root);
729 if (context) {
Mark McLoughlinc07e7d22008-11-21 16:54:46 +0000730 context_clear_entry(&context[devfn]);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700731 __iommu_flush_cache(iommu, &context[devfn], \
732 sizeof(*context));
733 }
734 spin_unlock_irqrestore(&iommu->lock, flags);
735}
736
737static void free_context_table(struct intel_iommu *iommu)
738{
739 struct root_entry *root;
740 int i;
741 unsigned long flags;
742 struct context_entry *context;
743
744 spin_lock_irqsave(&iommu->lock, flags);
745 if (!iommu->root_entry) {
746 goto out;
747 }
748 for (i = 0; i < ROOT_ENTRY_NR; i++) {
749 root = &iommu->root_entry[i];
750 context = get_context_addr_from_root(root);
751 if (context)
752 free_pgtable_page(context);
753 }
754 free_pgtable_page(iommu->root_entry);
755 iommu->root_entry = NULL;
756out:
757 spin_unlock_irqrestore(&iommu->lock, flags);
758}
759
David Woodhouseb026fd22009-06-28 10:37:25 +0100760static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
Allen Kay4399c8b2011-10-14 12:32:46 -0700761 unsigned long pfn, int target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700762{
David Woodhouseb026fd22009-06-28 10:37:25 +0100763 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700764 struct dma_pte *parent, *pte = NULL;
765 int level = agaw_to_level(domain->agaw);
Allen Kay4399c8b2011-10-14 12:32:46 -0700766 int offset;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700767
768 BUG_ON(!domain->pgd);
David Woodhouseb026fd22009-06-28 10:37:25 +0100769 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700770 parent = domain->pgd;
771
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700772 while (level > 0) {
773 void *tmp_page;
774
David Woodhouseb026fd22009-06-28 10:37:25 +0100775 offset = pfn_level_offset(pfn, level);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700776 pte = &parent[offset];
Allen Kay4399c8b2011-10-14 12:32:46 -0700777 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100778 break;
779 if (level == target_level)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700780 break;
781
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000782 if (!dma_pte_present(pte)) {
David Woodhousec85994e2009-07-01 19:21:24 +0100783 uint64_t pteval;
784
Suresh Siddha4c923d42009-10-02 11:01:24 -0700785 tmp_page = alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700786
David Woodhouse206a73c2009-07-01 19:30:28 +0100787 if (!tmp_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700788 return NULL;
David Woodhouse206a73c2009-07-01 19:30:28 +0100789
David Woodhousec85994e2009-07-01 19:21:24 +0100790 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
Benjamin LaHaise64de5af2009-09-16 21:05:55 -0400791 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
David Woodhousec85994e2009-07-01 19:21:24 +0100792 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
793 /* Someone else set it while we were thinking; use theirs. */
794 free_pgtable_page(tmp_page);
795 } else {
796 dma_pte_addr(pte);
797 domain_flush_cache(domain, pte, sizeof(*pte));
798 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700799 }
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000800 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700801 level--;
802 }
803
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700804 return pte;
805}
806
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100807
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700808/* return address's pte at specific level */
David Woodhouse90dcfb52009-06-27 17:14:59 +0100809static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
810 unsigned long pfn,
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100811 int level, int *large_page)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700812{
813 struct dma_pte *parent, *pte = NULL;
814 int total = agaw_to_level(domain->agaw);
815 int offset;
816
817 parent = domain->pgd;
818 while (level <= total) {
David Woodhouse90dcfb52009-06-27 17:14:59 +0100819 offset = pfn_level_offset(pfn, total);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700820 pte = &parent[offset];
821 if (level == total)
822 return pte;
823
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100824 if (!dma_pte_present(pte)) {
825 *large_page = total;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700826 break;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100827 }
828
829 if (pte->val & DMA_PTE_LARGE_PAGE) {
830 *large_page = total;
831 return pte;
832 }
833
Mark McLoughlin19c239c2008-11-21 16:56:53 +0000834 parent = phys_to_virt(dma_pte_addr(pte));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700835 total--;
836 }
837 return NULL;
838}
839
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700840/* clear last level pte, a tlb flush should be followed */
Allen Kay292827c2011-10-14 12:31:54 -0700841static int dma_pte_clear_range(struct dmar_domain *domain,
David Woodhouse595badf2009-06-27 22:09:11 +0100842 unsigned long start_pfn,
843 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700844{
David Woodhouse04b18e62009-06-27 19:15:01 +0100845 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100846 unsigned int large_page = 1;
David Woodhouse310a5ab2009-06-28 18:52:20 +0100847 struct dma_pte *first_pte, *pte;
Allen Kay292827c2011-10-14 12:31:54 -0700848 int order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700849
David Woodhouse04b18e62009-06-27 19:15:01 +0100850 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
David Woodhouse595badf2009-06-27 22:09:11 +0100851 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700852 BUG_ON(start_pfn > last_pfn);
David Woodhouse66eae842009-06-27 19:00:32 +0100853
David Woodhouse04b18e62009-06-27 19:15:01 +0100854 /* we don't need lock here; nobody else touches the iova range */
David Woodhouse59c36282009-09-19 07:36:28 -0700855 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100856 large_page = 1;
857 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100858 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100859 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100860 continue;
861 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100862 do {
David Woodhouse310a5ab2009-06-28 18:52:20 +0100863 dma_clear_pte(pte);
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100864 start_pfn += lvl_to_nr_pages(large_page);
David Woodhouse310a5ab2009-06-28 18:52:20 +0100865 pte++;
David Woodhouse75e6bf92009-07-02 11:21:16 +0100866 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
867
David Woodhouse310a5ab2009-06-28 18:52:20 +0100868 domain_flush_cache(domain, first_pte,
869 (void *)pte - (void *)first_pte);
David Woodhouse59c36282009-09-19 07:36:28 -0700870
871 } while (start_pfn && start_pfn <= last_pfn);
Allen Kay292827c2011-10-14 12:31:54 -0700872
873 order = (large_page - 1) * 9;
874 return order;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700875}
876
877/* free page table pages. last level pte should already be cleared */
878static void dma_pte_free_pagetable(struct dmar_domain *domain,
David Woodhoused794dc92009-06-28 00:27:49 +0100879 unsigned long start_pfn,
880 unsigned long last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700881{
David Woodhouse6660c632009-06-27 22:41:00 +0100882 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhousef3a0a522009-06-30 03:40:07 +0100883 struct dma_pte *first_pte, *pte;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700884 int total = agaw_to_level(domain->agaw);
885 int level;
David Woodhouse6660c632009-06-27 22:41:00 +0100886 unsigned long tmp;
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100887 int large_page = 2;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700888
David Woodhouse6660c632009-06-27 22:41:00 +0100889 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
890 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
David Woodhouse59c36282009-09-19 07:36:28 -0700891 BUG_ON(start_pfn > last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700892
David Woodhousef3a0a522009-06-30 03:40:07 +0100893 /* We don't need lock here; nobody else touches the iova range */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700894 level = 2;
895 while (level <= total) {
David Woodhouse6660c632009-06-27 22:41:00 +0100896 tmp = align_to_level(start_pfn, level);
897
David Woodhousef3a0a522009-06-30 03:40:07 +0100898 /* If we can't even clear one PTE at this level, we're done */
David Woodhouse6660c632009-06-27 22:41:00 +0100899 if (tmp + level_size(level) - 1 > last_pfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700900 return;
901
David Woodhouse59c36282009-09-19 07:36:28 -0700902 do {
Youquan Song6dd9a7c2011-05-25 19:13:49 +0100903 large_page = level;
904 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
905 if (large_page > level)
906 level = large_page + 1;
David Woodhousef3a0a522009-06-30 03:40:07 +0100907 if (!pte) {
908 tmp = align_to_level(tmp + 1, level + 1);
909 continue;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700910 }
David Woodhouse75e6bf92009-07-02 11:21:16 +0100911 do {
David Woodhouse6a43e572009-07-02 12:02:34 +0100912 if (dma_pte_present(pte)) {
913 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
914 dma_clear_pte(pte);
915 }
David Woodhousef3a0a522009-06-30 03:40:07 +0100916 pte++;
917 tmp += level_size(level);
David Woodhouse75e6bf92009-07-02 11:21:16 +0100918 } while (!first_pte_in_page(pte) &&
919 tmp + level_size(level) - 1 <= last_pfn);
920
David Woodhousef3a0a522009-06-30 03:40:07 +0100921 domain_flush_cache(domain, first_pte,
922 (void *)pte - (void *)first_pte);
923
David Woodhouse59c36282009-09-19 07:36:28 -0700924 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700925 level++;
926 }
927 /* free pgd */
David Woodhoused794dc92009-06-28 00:27:49 +0100928 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700929 free_pgtable_page(domain->pgd);
930 domain->pgd = NULL;
931 }
932}
933
934/* iommu handling */
935static int iommu_alloc_root_entry(struct intel_iommu *iommu)
936{
937 struct root_entry *root;
938 unsigned long flags;
939
Suresh Siddha4c923d42009-10-02 11:01:24 -0700940 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700941 if (!root)
942 return -ENOMEM;
943
Fenghua Yu5b6985c2008-10-16 18:02:32 -0700944 __iommu_flush_cache(iommu, root, ROOT_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700945
946 spin_lock_irqsave(&iommu->lock, flags);
947 iommu->root_entry = root;
948 spin_unlock_irqrestore(&iommu->lock, flags);
949
950 return 0;
951}
952
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700953static void iommu_set_root_entry(struct intel_iommu *iommu)
954{
955 void *addr;
David Woodhousec416daa2009-05-10 20:30:58 +0100956 u32 sts;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700957 unsigned long flag;
958
959 addr = iommu->root_entry;
960
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200961 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700962 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
963
David Woodhousec416daa2009-05-10 20:30:58 +0100964 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700965
966 /* Make sure hardware complete it */
967 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100968 readl, (sts & DMA_GSTS_RTPS), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700969
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200970 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700971}
972
973static void iommu_flush_write_buffer(struct intel_iommu *iommu)
974{
975 u32 val;
976 unsigned long flag;
977
David Woodhouse9af88142009-02-13 23:18:03 +0000978 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700979 return;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700980
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200981 raw_spin_lock_irqsave(&iommu->register_lock, flag);
David Woodhouse462b60f2009-05-10 20:18:18 +0100982 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700983
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +0100986 readl, (!(val & DMA_GSTS_WBFS)), val);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700987
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +0200988 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700989}
990
991/* return value determine if we need a write buffer flush */
David Woodhouse4c25a2c2009-05-10 17:16:06 +0100992static void __iommu_flush_context(struct intel_iommu *iommu,
993 u16 did, u16 source_id, u8 function_mask,
994 u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700995{
996 u64 val = 0;
997 unsigned long flag;
998
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -0700999 switch (type) {
1000 case DMA_CCMD_GLOBAL_INVL:
1001 val = DMA_CCMD_GLOBAL_INVL;
1002 break;
1003 case DMA_CCMD_DOMAIN_INVL:
1004 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1005 break;
1006 case DMA_CCMD_DEVICE_INVL:
1007 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1008 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1009 break;
1010 default:
1011 BUG();
1012 }
1013 val |= DMA_CCMD_ICC;
1014
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001015 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001016 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1017
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1020 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1021
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001022 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001023}
1024
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001025/* return value determine if we need a write buffer flush */
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001026static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1027 u64 addr, unsigned int size_order, u64 type)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001028{
1029 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1030 u64 val = 0, val_iva = 0;
1031 unsigned long flag;
1032
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001033 switch (type) {
1034 case DMA_TLB_GLOBAL_FLUSH:
1035 /* global flush doesn't need set IVA_REG */
1036 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1037 break;
1038 case DMA_TLB_DSI_FLUSH:
1039 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1040 break;
1041 case DMA_TLB_PSI_FLUSH:
1042 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1043 /* Note: always flush non-leaf currently */
1044 val_iva = size_order | addr;
1045 break;
1046 default:
1047 BUG();
1048 }
1049 /* Note: set drain read/write */
1050#if 0
1051 /*
1052 * This is probably to be super secure.. Looks like we can
1053 * ignore it without any impact.
1054 */
1055 if (cap_read_drain(iommu->cap))
1056 val |= DMA_TLB_READ_DRAIN;
1057#endif
1058 if (cap_write_drain(iommu->cap))
1059 val |= DMA_TLB_WRITE_DRAIN;
1060
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001061 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001062 /* Note: Only uses first TLB reg currently */
1063 if (val_iva)
1064 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1065 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1066
1067 /* Make sure hardware complete it */
1068 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1069 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1070
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001071 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001072
1073 /* check IOTLB invalidation granularity */
1074 if (DMA_TLB_IAIG(val) == 0)
1075 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1076 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1077 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001078 (unsigned long long)DMA_TLB_IIRG(type),
1079 (unsigned long long)DMA_TLB_IAIG(val));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001080}
1081
Yu Zhao93a23a72009-05-18 13:51:37 +08001082static struct device_domain_info *iommu_support_dev_iotlb(
1083 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001084{
Yu Zhao93a23a72009-05-18 13:51:37 +08001085 int found = 0;
1086 unsigned long flags;
1087 struct device_domain_info *info;
1088 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1089
1090 if (!ecap_dev_iotlb_support(iommu->ecap))
1091 return NULL;
1092
1093 if (!iommu->qi)
1094 return NULL;
1095
1096 spin_lock_irqsave(&device_domain_lock, flags);
1097 list_for_each_entry(info, &domain->devices, link)
1098 if (info->bus == bus && info->devfn == devfn) {
1099 found = 1;
1100 break;
1101 }
1102 spin_unlock_irqrestore(&device_domain_lock, flags);
1103
1104 if (!found || !info->dev)
1105 return NULL;
1106
1107 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1108 return NULL;
1109
1110 if (!dmar_find_matched_atsr_unit(info->dev))
1111 return NULL;
1112
1113 info->iommu = iommu;
1114
1115 return info;
1116}
1117
1118static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1119{
1120 if (!info)
1121 return;
1122
1123 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1124}
1125
1126static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1127{
1128 if (!info->dev || !pci_ats_enabled(info->dev))
1129 return;
1130
1131 pci_disable_ats(info->dev);
1132}
1133
1134static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1135 u64 addr, unsigned mask)
1136{
1137 u16 sid, qdep;
1138 unsigned long flags;
1139 struct device_domain_info *info;
1140
1141 spin_lock_irqsave(&device_domain_lock, flags);
1142 list_for_each_entry(info, &domain->devices, link) {
1143 if (!info->dev || !pci_ats_enabled(info->dev))
1144 continue;
1145
1146 sid = info->bus << 8 | info->devfn;
1147 qdep = pci_ats_queue_depth(info->dev);
1148 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1149 }
1150 spin_unlock_irqrestore(&device_domain_lock, flags);
1151}
1152
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001153static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
Nadav Amit82653632010-04-01 13:24:40 +03001154 unsigned long pfn, unsigned int pages, int map)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001155{
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001156 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
David Woodhouse03d6a242009-06-28 15:33:46 +01001157 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001158
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001159 BUG_ON(pages == 0);
1160
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001161 /*
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001162 * Fallback to domain selective flush if no PSI support or the size is
1163 * too big.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001164 * PSI requires page size to be 2 ^ x, and the base address is naturally
1165 * aligned to the size
1166 */
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001167 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1168 iommu->flush.flush_iotlb(iommu, did, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001169 DMA_TLB_DSI_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08001170 else
1171 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1172 DMA_TLB_PSI_FLUSH);
Yu Zhaobf92df32009-06-29 11:31:45 +08001173
1174 /*
Nadav Amit82653632010-04-01 13:24:40 +03001175 * In caching mode, changes of pages from non-present to present require
1176 * flush. However, device IOTLB doesn't need to be flushed in this case.
Yu Zhaobf92df32009-06-29 11:31:45 +08001177 */
Nadav Amit82653632010-04-01 13:24:40 +03001178 if (!cap_caching_mode(iommu->cap) || !map)
Yu Zhao93a23a72009-05-18 13:51:37 +08001179 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001180}
1181
mark grossf8bab732008-02-08 04:18:38 -08001182static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1183{
1184 u32 pmen;
1185 unsigned long flags;
1186
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001187 raw_spin_lock_irqsave(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001188 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1189 pmen &= ~DMA_PMEN_EPM;
1190 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1191
1192 /* wait for the protected region status bit to clear */
1193 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1194 readl, !(pmen & DMA_PMEN_PRS), pmen);
1195
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001196 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
mark grossf8bab732008-02-08 04:18:38 -08001197}
1198
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001199static int iommu_enable_translation(struct intel_iommu *iommu)
1200{
1201 u32 sts;
1202 unsigned long flags;
1203
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001204 raw_spin_lock_irqsave(&iommu->register_lock, flags);
David Woodhousec416daa2009-05-10 20:30:58 +01001205 iommu->gcmd |= DMA_GCMD_TE;
1206 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001207
1208 /* Make sure hardware complete it */
1209 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001210 readl, (sts & DMA_GSTS_TES), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001211
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001212 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001213 return 0;
1214}
1215
1216static int iommu_disable_translation(struct intel_iommu *iommu)
1217{
1218 u32 sts;
1219 unsigned long flag;
1220
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001221 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001222 iommu->gcmd &= ~DMA_GCMD_TE;
1223 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1224
1225 /* Make sure hardware complete it */
1226 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
David Woodhousec416daa2009-05-10 20:30:58 +01001227 readl, (!(sts & DMA_GSTS_TES)), sts);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001228
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02001229 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001230 return 0;
1231}
1232
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07001233
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001234static int iommu_init_domains(struct intel_iommu *iommu)
1235{
1236 unsigned long ndomains;
1237 unsigned long nlongs;
1238
1239 ndomains = cap_ndoms(iommu->cap);
Yinghai Lu680a7522010-04-08 19:58:23 +01001240 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1241 ndomains);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001242 nlongs = BITS_TO_LONGS(ndomains);
1243
Donald Dutile94a91b52009-08-20 16:51:34 -04001244 spin_lock_init(&iommu->lock);
1245
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001246 /* TBD: there might be 64K domains,
1247 * consider other allocation for future chip
1248 */
1249 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1250 if (!iommu->domain_ids) {
1251 printk(KERN_ERR "Allocating domain id array failed\n");
1252 return -ENOMEM;
1253 }
1254 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1255 GFP_KERNEL);
1256 if (!iommu->domains) {
1257 printk(KERN_ERR "Allocating domain array failed\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001258 return -ENOMEM;
1259 }
1260
1261 /*
1262 * if Caching mode is set, then invalid translations are tagged
1263 * with domainid 0. Hence we need to pre-allocate it.
1264 */
1265 if (cap_caching_mode(iommu->cap))
1266 set_bit(0, iommu->domain_ids);
1267 return 0;
1268}
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001269
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001270
1271static void domain_exit(struct dmar_domain *domain);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001272static void vm_domain_exit(struct dmar_domain *domain);
Suresh Siddhae61d98d2008-07-10 11:16:35 -07001273
1274void free_dmar_iommu(struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001275{
1276 struct dmar_domain *domain;
1277 int i;
Weidong Hanc7151a82008-12-08 22:51:37 +08001278 unsigned long flags;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001279
Donald Dutile94a91b52009-08-20 16:51:34 -04001280 if ((iommu->domains) && (iommu->domain_ids)) {
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001281 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
Donald Dutile94a91b52009-08-20 16:51:34 -04001282 domain = iommu->domains[i];
1283 clear_bit(i, iommu->domain_ids);
Weidong Hanc7151a82008-12-08 22:51:37 +08001284
Donald Dutile94a91b52009-08-20 16:51:34 -04001285 spin_lock_irqsave(&domain->iommu_lock, flags);
1286 if (--domain->iommu_count == 0) {
1287 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1288 vm_domain_exit(domain);
1289 else
1290 domain_exit(domain);
1291 }
1292 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Weidong Han5e98c4b2008-12-08 23:03:27 +08001293 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001294 }
1295
1296 if (iommu->gcmd & DMA_GCMD_TE)
1297 iommu_disable_translation(iommu);
1298
1299 if (iommu->irq) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +02001300 irq_set_handler_data(iommu->irq, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001301 /* This will mask the irq */
1302 free_irq(iommu->irq, iommu);
1303 destroy_irq(iommu->irq);
1304 }
1305
1306 kfree(iommu->domains);
1307 kfree(iommu->domain_ids);
1308
Weidong Hand9630fe2008-12-08 11:06:32 +08001309 g_iommus[iommu->seq_id] = NULL;
1310
1311 /* if all iommus are freed, free g_iommus */
1312 for (i = 0; i < g_num_of_iommus; i++) {
1313 if (g_iommus[i])
1314 break;
1315 }
1316
1317 if (i == g_num_of_iommus)
1318 kfree(g_iommus);
1319
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001320 /* free context mapping */
1321 free_context_table(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001322}
1323
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001324static struct dmar_domain *alloc_domain(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001325{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001326 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001327
1328 domain = alloc_domain_mem();
1329 if (!domain)
1330 return NULL;
1331
Suresh Siddha4c923d42009-10-02 11:01:24 -07001332 domain->nid = -1;
Weidong Han8c11e792008-12-08 15:29:22 +08001333 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
Weidong Hand71a2f32008-12-07 21:13:41 +08001334 domain->flags = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001335
1336 return domain;
1337}
1338
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001339static int iommu_attach_domain(struct dmar_domain *domain,
1340 struct intel_iommu *iommu)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001341{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001342 int num;
1343 unsigned long ndomains;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001344 unsigned long flags;
1345
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001346 ndomains = cap_ndoms(iommu->cap);
Weidong Han8c11e792008-12-08 15:29:22 +08001347
1348 spin_lock_irqsave(&iommu->lock, flags);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001349
1350 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1351 if (num >= ndomains) {
1352 spin_unlock_irqrestore(&iommu->lock, flags);
1353 printk(KERN_ERR "IOMMU: no free domain ids\n");
1354 return -ENOMEM;
1355 }
1356
1357 domain->id = num;
1358 set_bit(num, iommu->domain_ids);
1359 set_bit(iommu->seq_id, &domain->iommu_bmp);
1360 iommu->domains[num] = domain;
1361 spin_unlock_irqrestore(&iommu->lock, flags);
1362
1363 return 0;
1364}
1365
1366static void iommu_detach_domain(struct dmar_domain *domain,
1367 struct intel_iommu *iommu)
1368{
1369 unsigned long flags;
1370 int num, ndomains;
1371 int found = 0;
1372
1373 spin_lock_irqsave(&iommu->lock, flags);
1374 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001375 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001376 if (iommu->domains[num] == domain) {
1377 found = 1;
1378 break;
1379 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001380 }
1381
1382 if (found) {
1383 clear_bit(num, iommu->domain_ids);
1384 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1385 iommu->domains[num] = NULL;
1386 }
Weidong Han8c11e792008-12-08 15:29:22 +08001387 spin_unlock_irqrestore(&iommu->lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001388}
1389
1390static struct iova_domain reserved_iova_list;
Mark Gross8a443df2008-03-04 14:59:31 -08001391static struct lock_class_key reserved_rbtree_key;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001392
Joseph Cihula51a63e62011-03-21 11:04:24 -07001393static int dmar_init_reserved_ranges(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001394{
1395 struct pci_dev *pdev = NULL;
1396 struct iova *iova;
1397 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001398
David Millerf6611972008-02-06 01:36:23 -08001399 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001400
Mark Gross8a443df2008-03-04 14:59:31 -08001401 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1402 &reserved_rbtree_key);
1403
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001404 /* IOAPIC ranges shouldn't be accessed by DMA */
1405 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1406 IOVA_PFN(IOAPIC_RANGE_END));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001407 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001408 printk(KERN_ERR "Reserve IOAPIC range failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001409 return -ENODEV;
1410 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001411
1412 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1413 for_each_pci_dev(pdev) {
1414 struct resource *r;
1415
1416 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1417 r = &pdev->resource[i];
1418 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1419 continue;
David Woodhouse1a4a4552009-06-28 16:00:42 +01001420 iova = reserve_iova(&reserved_iova_list,
1421 IOVA_PFN(r->start),
1422 IOVA_PFN(r->end));
Joseph Cihula51a63e62011-03-21 11:04:24 -07001423 if (!iova) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001424 printk(KERN_ERR "Reserve iova failed\n");
Joseph Cihula51a63e62011-03-21 11:04:24 -07001425 return -ENODEV;
1426 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001427 }
1428 }
Joseph Cihula51a63e62011-03-21 11:04:24 -07001429 return 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001430}
1431
1432static void domain_reserve_special_ranges(struct dmar_domain *domain)
1433{
1434 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1435}
1436
1437static inline int guestwidth_to_adjustwidth(int gaw)
1438{
1439 int agaw;
1440 int r = (gaw - 12) % 9;
1441
1442 if (r == 0)
1443 agaw = gaw;
1444 else
1445 agaw = gaw + 9 - r;
1446 if (agaw > 64)
1447 agaw = 64;
1448 return agaw;
1449}
1450
1451static int domain_init(struct dmar_domain *domain, int guest_width)
1452{
1453 struct intel_iommu *iommu;
1454 int adjust_width, agaw;
1455 unsigned long sagaw;
1456
David Millerf6611972008-02-06 01:36:23 -08001457 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Hanc7151a82008-12-08 22:51:37 +08001458 spin_lock_init(&domain->iommu_lock);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001459
1460 domain_reserve_special_ranges(domain);
1461
1462 /* calculate AGAW */
Weidong Han8c11e792008-12-08 15:29:22 +08001463 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001464 if (guest_width > cap_mgaw(iommu->cap))
1465 guest_width = cap_mgaw(iommu->cap);
1466 domain->gaw = guest_width;
1467 adjust_width = guestwidth_to_adjustwidth(guest_width);
1468 agaw = width_to_agaw(adjust_width);
1469 sagaw = cap_sagaw(iommu->cap);
1470 if (!test_bit(agaw, &sagaw)) {
1471 /* hardware doesn't support it, choose a bigger one */
1472 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1473 agaw = find_next_bit(&sagaw, 5, agaw);
1474 if (agaw >= 5)
1475 return -ENODEV;
1476 }
1477 domain->agaw = agaw;
1478 INIT_LIST_HEAD(&domain->devices);
1479
Weidong Han8e6040972008-12-08 15:49:06 +08001480 if (ecap_coherent(iommu->ecap))
1481 domain->iommu_coherency = 1;
1482 else
1483 domain->iommu_coherency = 0;
1484
Sheng Yang58c610b2009-03-18 15:33:05 +08001485 if (ecap_sc_support(iommu->ecap))
1486 domain->iommu_snooping = 1;
1487 else
1488 domain->iommu_snooping = 0;
1489
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001490 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
Weidong Hanc7151a82008-12-08 22:51:37 +08001491 domain->iommu_count = 1;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001492 domain->nid = iommu->node;
Weidong Hanc7151a82008-12-08 22:51:37 +08001493
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001494 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07001495 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001496 if (!domain->pgd)
1497 return -ENOMEM;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001498 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001499 return 0;
1500}
1501
1502static void domain_exit(struct dmar_domain *domain)
1503{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001504 struct dmar_drhd_unit *drhd;
1505 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001506
1507 /* Domain 0 is reserved, so dont process it */
1508 if (!domain)
1509 return;
1510
Alex Williamson7b668352011-05-24 12:02:41 +01001511 /* Flush any lazy unmaps that may reference this domain */
1512 if (!intel_iommu_strict)
1513 flush_unmaps_timeout(0);
1514
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001515 domain_remove_dev_info(domain);
1516 /* destroy iovas */
1517 put_iova_domain(&domain->iovad);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001518
1519 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01001520 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001521
1522 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01001523 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001524
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001525 for_each_active_iommu(iommu, drhd)
1526 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1527 iommu_detach_domain(domain, iommu);
1528
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001529 free_domain_mem(domain);
1530}
1531
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001532static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1533 u8 bus, u8 devfn, int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001534{
1535 struct context_entry *context;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001536 unsigned long flags;
Weidong Han5331fe62008-12-08 23:00:00 +08001537 struct intel_iommu *iommu;
Weidong Hanea6606b2008-12-08 23:08:15 +08001538 struct dma_pte *pgd;
1539 unsigned long num;
1540 unsigned long ndomains;
1541 int id;
1542 int agaw;
Yu Zhao93a23a72009-05-18 13:51:37 +08001543 struct device_domain_info *info = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001544
1545 pr_debug("Set context mapping for %02x:%02x.%d\n",
1546 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001547
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001548 BUG_ON(!domain->pgd);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001549 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1550 translation != CONTEXT_TT_MULTI_LEVEL);
Weidong Han5331fe62008-12-08 23:00:00 +08001551
David Woodhouse276dbf992009-04-04 01:45:37 +01001552 iommu = device_to_iommu(segment, bus, devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001553 if (!iommu)
1554 return -ENODEV;
1555
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001556 context = device_to_context_entry(iommu, bus, devfn);
1557 if (!context)
1558 return -ENOMEM;
1559 spin_lock_irqsave(&iommu->lock, flags);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001560 if (context_present(context)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001561 spin_unlock_irqrestore(&iommu->lock, flags);
1562 return 0;
1563 }
1564
Weidong Hanea6606b2008-12-08 23:08:15 +08001565 id = domain->id;
1566 pgd = domain->pgd;
1567
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001568 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1569 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001570 int found = 0;
1571
1572 /* find an available domain id for this device in iommu */
1573 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08001574 for_each_set_bit(num, iommu->domain_ids, ndomains) {
Weidong Hanea6606b2008-12-08 23:08:15 +08001575 if (iommu->domains[num] == domain) {
1576 id = num;
1577 found = 1;
1578 break;
1579 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001580 }
1581
1582 if (found == 0) {
1583 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1584 if (num >= ndomains) {
1585 spin_unlock_irqrestore(&iommu->lock, flags);
1586 printk(KERN_ERR "IOMMU: no free domain ids\n");
1587 return -EFAULT;
1588 }
1589
1590 set_bit(num, iommu->domain_ids);
1591 iommu->domains[num] = domain;
1592 id = num;
1593 }
1594
1595 /* Skip top levels of page tables for
1596 * iommu which has less agaw than default.
Chris Wright1672af12009-12-02 12:06:34 -08001597 * Unnecessary for PT mode.
Weidong Hanea6606b2008-12-08 23:08:15 +08001598 */
Chris Wright1672af12009-12-02 12:06:34 -08001599 if (translation != CONTEXT_TT_PASS_THROUGH) {
1600 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1601 pgd = phys_to_virt(dma_pte_addr(pgd));
1602 if (!dma_pte_present(pgd)) {
1603 spin_unlock_irqrestore(&iommu->lock, flags);
1604 return -ENOMEM;
1605 }
Weidong Hanea6606b2008-12-08 23:08:15 +08001606 }
1607 }
1608 }
1609
1610 context_set_domain_id(context, id);
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001611
Yu Zhao93a23a72009-05-18 13:51:37 +08001612 if (translation != CONTEXT_TT_PASS_THROUGH) {
1613 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1614 translation = info ? CONTEXT_TT_DEV_IOTLB :
1615 CONTEXT_TT_MULTI_LEVEL;
1616 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001617 /*
1618 * In pass through mode, AW must be programmed to indicate the largest
1619 * AGAW value supported by hardware. And ASR is ignored by hardware.
1620 */
Yu Zhao93a23a72009-05-18 13:51:37 +08001621 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001622 context_set_address_width(context, iommu->msagaw);
Yu Zhao93a23a72009-05-18 13:51:37 +08001623 else {
1624 context_set_address_root(context, virt_to_phys(pgd));
1625 context_set_address_width(context, iommu->agaw);
1626 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001627
1628 context_set_translation_type(context, translation);
Mark McLoughlinc07e7d22008-11-21 16:54:46 +00001629 context_set_fault_enable(context);
1630 context_set_present(context);
Weidong Han5331fe62008-12-08 23:00:00 +08001631 domain_flush_cache(domain, context, sizeof(*context));
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001632
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001633 /*
1634 * It's a non-present to present mapping. If hardware doesn't cache
1635 * non-present entry we only need to flush the write-buffer. If the
1636 * _does_ cache non-present entries, then it does so in the special
1637 * domain #0, which we have to flush:
1638 */
1639 if (cap_caching_mode(iommu->cap)) {
1640 iommu->flush.flush_context(iommu, 0,
1641 (((u16)bus) << 8) | devfn,
1642 DMA_CCMD_MASK_NOBIT,
1643 DMA_CCMD_DEVICE_INVL);
Nadav Amit82653632010-04-01 13:24:40 +03001644 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001645 } else {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001646 iommu_flush_write_buffer(iommu);
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001647 }
Yu Zhao93a23a72009-05-18 13:51:37 +08001648 iommu_enable_dev_iotlb(info);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001649 spin_unlock_irqrestore(&iommu->lock, flags);
Weidong Hanc7151a82008-12-08 22:51:37 +08001650
1651 spin_lock_irqsave(&domain->iommu_lock, flags);
1652 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1653 domain->iommu_count++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07001654 if (domain->iommu_count == 1)
1655 domain->nid = iommu->node;
Sheng Yang58c610b2009-03-18 15:33:05 +08001656 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08001657 }
1658 spin_unlock_irqrestore(&domain->iommu_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001659 return 0;
1660}
1661
1662static int
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001663domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1664 int translation)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001665{
1666 int ret;
1667 struct pci_dev *tmp, *parent;
1668
David Woodhouse276dbf992009-04-04 01:45:37 +01001669 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001670 pdev->bus->number, pdev->devfn,
1671 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001672 if (ret)
1673 return ret;
1674
1675 /* dependent device mapping */
1676 tmp = pci_find_upstream_pcie_bridge(pdev);
1677 if (!tmp)
1678 return 0;
1679 /* Secondary interface's bus number and devfn 0 */
1680 parent = pdev->bus->self;
1681 while (parent != tmp) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001682 ret = domain_context_mapping_one(domain,
1683 pci_domain_nr(parent->bus),
1684 parent->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001685 parent->devfn, translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001686 if (ret)
1687 return ret;
1688 parent = parent->bus->self;
1689 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05001690 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001691 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001692 pci_domain_nr(tmp->subordinate),
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001693 tmp->subordinate->number, 0,
1694 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001695 else /* this is a legacy PCI bridge */
1696 return domain_context_mapping_one(domain,
David Woodhouse276dbf992009-04-04 01:45:37 +01001697 pci_domain_nr(tmp->bus),
1698 tmp->bus->number,
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07001699 tmp->devfn,
1700 translation);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001701}
1702
Weidong Han5331fe62008-12-08 23:00:00 +08001703static int domain_context_mapped(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001704{
1705 int ret;
1706 struct pci_dev *tmp, *parent;
Weidong Han5331fe62008-12-08 23:00:00 +08001707 struct intel_iommu *iommu;
1708
David Woodhouse276dbf992009-04-04 01:45:37 +01001709 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1710 pdev->devfn);
Weidong Han5331fe62008-12-08 23:00:00 +08001711 if (!iommu)
1712 return -ENODEV;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001713
David Woodhouse276dbf992009-04-04 01:45:37 +01001714 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001715 if (!ret)
1716 return ret;
1717 /* dependent device mapping */
1718 tmp = pci_find_upstream_pcie_bridge(pdev);
1719 if (!tmp)
1720 return ret;
1721 /* Secondary interface's bus number and devfn 0 */
1722 parent = pdev->bus->self;
1723 while (parent != tmp) {
Weidong Han8c11e792008-12-08 15:29:22 +08001724 ret = device_context_mapped(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01001725 parent->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001726 if (!ret)
1727 return ret;
1728 parent = parent->bus->self;
1729 }
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001730 if (pci_is_pcie(tmp))
David Woodhouse276dbf992009-04-04 01:45:37 +01001731 return device_context_mapped(iommu, tmp->subordinate->number,
1732 0);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001733 else
David Woodhouse276dbf992009-04-04 01:45:37 +01001734 return device_context_mapped(iommu, tmp->bus->number,
1735 tmp->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001736}
1737
Fenghua Yuf5329592009-08-04 15:09:37 -07001738/* Returns a number of VTD pages, but aligned to MM page size */
1739static inline unsigned long aligned_nrpages(unsigned long host_addr,
1740 size_t size)
1741{
1742 host_addr &= ~PAGE_MASK;
1743 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1744}
1745
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001746/* Return largest possible superpage level for a given mapping */
1747static inline int hardware_largepage_caps(struct dmar_domain *domain,
1748 unsigned long iov_pfn,
1749 unsigned long phy_pfn,
1750 unsigned long pages)
1751{
1752 int support, level = 1;
1753 unsigned long pfnmerge;
1754
1755 support = domain->iommu_superpage;
1756
1757 /* To use a large page, the virtual *and* physical addresses
1758 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1759 of them will mean we have to use smaller pages. So just
1760 merge them and check both at once. */
1761 pfnmerge = iov_pfn | phy_pfn;
1762
1763 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1764 pages >>= VTD_STRIDE_SHIFT;
1765 if (!pages)
1766 break;
1767 pfnmerge >>= VTD_STRIDE_SHIFT;
1768 level++;
1769 support--;
1770 }
1771 return level;
1772}
1773
David Woodhouse9051aa02009-06-29 12:30:54 +01001774static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1775 struct scatterlist *sg, unsigned long phys_pfn,
1776 unsigned long nr_pages, int prot)
David Woodhousee1605492009-06-29 11:17:38 +01001777{
1778 struct dma_pte *first_pte = NULL, *pte = NULL;
David Woodhouse9051aa02009-06-29 12:30:54 +01001779 phys_addr_t uninitialized_var(pteval);
David Woodhousee1605492009-06-29 11:17:38 +01001780 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
David Woodhouse9051aa02009-06-29 12:30:54 +01001781 unsigned long sg_res;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001782 unsigned int largepage_lvl = 0;
1783 unsigned long lvl_pages = 0;
David Woodhousee1605492009-06-29 11:17:38 +01001784
1785 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1786
1787 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1788 return -EINVAL;
1789
1790 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1791
David Woodhouse9051aa02009-06-29 12:30:54 +01001792 if (sg)
1793 sg_res = 0;
1794 else {
1795 sg_res = nr_pages + 1;
1796 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1797 }
1798
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001799 while (nr_pages > 0) {
David Woodhousec85994e2009-07-01 19:21:24 +01001800 uint64_t tmp;
1801
David Woodhousee1605492009-06-29 11:17:38 +01001802 if (!sg_res) {
Fenghua Yuf5329592009-08-04 15:09:37 -07001803 sg_res = aligned_nrpages(sg->offset, sg->length);
David Woodhousee1605492009-06-29 11:17:38 +01001804 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1805 sg->dma_length = sg->length;
1806 pteval = page_to_phys(sg_page(sg)) | prot;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001807 phys_pfn = pteval >> VTD_PAGE_SHIFT;
David Woodhousee1605492009-06-29 11:17:38 +01001808 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001809
David Woodhousee1605492009-06-29 11:17:38 +01001810 if (!pte) {
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001811 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1812
1813 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
David Woodhousee1605492009-06-29 11:17:38 +01001814 if (!pte)
1815 return -ENOMEM;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001816 /* It is large page*/
1817 if (largepage_lvl > 1)
1818 pteval |= DMA_PTE_LARGE_PAGE;
1819 else
1820 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1821
David Woodhousee1605492009-06-29 11:17:38 +01001822 }
1823 /* We don't need lock here, nobody else
1824 * touches the iova range
1825 */
David Woodhouse7766a3f2009-07-01 20:27:03 +01001826 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
David Woodhousec85994e2009-07-01 19:21:24 +01001827 if (tmp) {
David Woodhouse1bf20f02009-06-29 22:06:43 +01001828 static int dumps = 5;
David Woodhousec85994e2009-07-01 19:21:24 +01001829 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1830 iov_pfn, tmp, (unsigned long long)pteval);
David Woodhouse1bf20f02009-06-29 22:06:43 +01001831 if (dumps) {
1832 dumps--;
1833 debug_dma_dump_mappings(NULL);
1834 }
1835 WARN_ON(1);
1836 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001837
1838 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1839
1840 BUG_ON(nr_pages < lvl_pages);
1841 BUG_ON(sg_res < lvl_pages);
1842
1843 nr_pages -= lvl_pages;
1844 iov_pfn += lvl_pages;
1845 phys_pfn += lvl_pages;
1846 pteval += lvl_pages * VTD_PAGE_SIZE;
1847 sg_res -= lvl_pages;
1848
1849 /* If the next PTE would be the first in a new page, then we
1850 need to flush the cache on the entries we've just written.
1851 And then we'll need to recalculate 'pte', so clear it and
1852 let it get set again in the if (!pte) block above.
1853
1854 If we're done (!nr_pages) we need to flush the cache too.
1855
1856 Also if we've been setting superpages, we may need to
1857 recalculate 'pte' and switch back to smaller pages for the
1858 end of the mapping, if the trailing size is not enough to
1859 use another superpage (i.e. sg_res < lvl_pages). */
David Woodhousee1605492009-06-29 11:17:38 +01001860 pte++;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001861 if (!nr_pages || first_pte_in_page(pte) ||
1862 (largepage_lvl > 1 && sg_res < lvl_pages)) {
David Woodhousee1605492009-06-29 11:17:38 +01001863 domain_flush_cache(domain, first_pte,
1864 (void *)pte - (void *)first_pte);
1865 pte = NULL;
1866 }
Youquan Song6dd9a7c2011-05-25 19:13:49 +01001867
1868 if (!sg_res && nr_pages)
David Woodhousee1605492009-06-29 11:17:38 +01001869 sg = sg_next(sg);
1870 }
1871 return 0;
1872}
1873
David Woodhouse9051aa02009-06-29 12:30:54 +01001874static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1875 struct scatterlist *sg, unsigned long nr_pages,
1876 int prot)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001877{
David Woodhouse9051aa02009-06-29 12:30:54 +01001878 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1879}
Fenghua Yu5b6985c2008-10-16 18:02:32 -07001880
David Woodhouse9051aa02009-06-29 12:30:54 +01001881static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1882 unsigned long phys_pfn, unsigned long nr_pages,
1883 int prot)
1884{
1885 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001886}
1887
Weidong Hanc7151a82008-12-08 22:51:37 +08001888static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001889{
Weidong Hanc7151a82008-12-08 22:51:37 +08001890 if (!iommu)
1891 return;
Weidong Han8c11e792008-12-08 15:29:22 +08001892
1893 clear_context_table(iommu, bus, devfn);
1894 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse4c25a2c2009-05-10 17:16:06 +01001895 DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01001896 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001897}
1898
1899static void domain_remove_dev_info(struct dmar_domain *domain)
1900{
1901 struct device_domain_info *info;
1902 unsigned long flags;
Weidong Hanc7151a82008-12-08 22:51:37 +08001903 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001904
1905 spin_lock_irqsave(&device_domain_lock, flags);
1906 while (!list_empty(&domain->devices)) {
1907 info = list_entry(domain->devices.next,
1908 struct device_domain_info, link);
1909 list_del(&info->link);
1910 list_del(&info->global);
1911 if (info->dev)
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001912 info->dev->dev.archdata.iommu = NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001913 spin_unlock_irqrestore(&device_domain_lock, flags);
1914
Yu Zhao93a23a72009-05-18 13:51:37 +08001915 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01001916 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08001917 iommu_detach_dev(iommu, info->bus, info->devfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001918 free_devinfo_mem(info);
1919
1920 spin_lock_irqsave(&device_domain_lock, flags);
1921 }
1922 spin_unlock_irqrestore(&device_domain_lock, flags);
1923}
1924
1925/*
1926 * find_domain
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001927 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001928 */
Kay, Allen M38717942008-09-09 18:37:29 +03001929static struct dmar_domain *
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001930find_domain(struct pci_dev *pdev)
1931{
1932 struct device_domain_info *info;
1933
1934 /* No lock here, assumes no domain exit in normal case */
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07001935 info = pdev->dev.archdata.iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001936 if (info)
1937 return info->domain;
1938 return NULL;
1939}
1940
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001941/* domain is initialized */
1942static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1943{
1944 struct dmar_domain *domain, *found = NULL;
1945 struct intel_iommu *iommu;
1946 struct dmar_drhd_unit *drhd;
1947 struct device_domain_info *info, *tmp;
1948 struct pci_dev *dev_tmp;
1949 unsigned long flags;
1950 int bus = 0, devfn = 0;
David Woodhouse276dbf992009-04-04 01:45:37 +01001951 int segment;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001952 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001953
1954 domain = find_domain(pdev);
1955 if (domain)
1956 return domain;
1957
David Woodhouse276dbf992009-04-04 01:45:37 +01001958 segment = pci_domain_nr(pdev->bus);
1959
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001960 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1961 if (dev_tmp) {
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09001962 if (pci_is_pcie(dev_tmp)) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001963 bus = dev_tmp->subordinate->number;
1964 devfn = 0;
1965 } else {
1966 bus = dev_tmp->bus->number;
1967 devfn = dev_tmp->devfn;
1968 }
1969 spin_lock_irqsave(&device_domain_lock, flags);
1970 list_for_each_entry(info, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01001971 if (info->segment == segment &&
1972 info->bus == bus && info->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001973 found = info->domain;
1974 break;
1975 }
1976 }
1977 spin_unlock_irqrestore(&device_domain_lock, flags);
1978 /* pcie-pci bridge already has a domain, uses it */
1979 if (found) {
1980 domain = found;
1981 goto found_domain;
1982 }
1983 }
1984
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001985 domain = alloc_domain();
1986 if (!domain)
1987 goto error;
1988
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07001989 /* Allocate new domain for the device */
1990 drhd = dmar_find_matched_drhd_unit(pdev);
1991 if (!drhd) {
1992 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1993 pci_name(pdev));
1994 return NULL;
1995 }
1996 iommu = drhd->iommu;
1997
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07001998 ret = iommu_attach_domain(domain, iommu);
1999 if (ret) {
Alex Williamson2fe9723d2011-03-04 14:52:30 -07002000 free_domain_mem(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002001 goto error;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002002 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002003
2004 if (domain_init(domain, gaw)) {
2005 domain_exit(domain);
2006 goto error;
2007 }
2008
2009 /* register pcie-to-pci device */
2010 if (dev_tmp) {
2011 info = alloc_devinfo_mem();
2012 if (!info) {
2013 domain_exit(domain);
2014 goto error;
2015 }
David Woodhouse276dbf992009-04-04 01:45:37 +01002016 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002017 info->bus = bus;
2018 info->devfn = devfn;
2019 info->dev = NULL;
2020 info->domain = domain;
2021 /* This domain is shared by devices under p2p bridge */
Weidong Han3b5410e2008-12-08 09:17:15 +08002022 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002023
2024 /* pcie-to-pci bridge already has a domain, uses it */
2025 found = NULL;
2026 spin_lock_irqsave(&device_domain_lock, flags);
2027 list_for_each_entry(tmp, &device_domain_list, global) {
David Woodhouse276dbf992009-04-04 01:45:37 +01002028 if (tmp->segment == segment &&
2029 tmp->bus == bus && tmp->devfn == devfn) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002030 found = tmp->domain;
2031 break;
2032 }
2033 }
2034 if (found) {
Jiri Slaby00dfff72010-06-14 17:17:32 +02002035 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002036 free_devinfo_mem(info);
2037 domain_exit(domain);
2038 domain = found;
2039 } else {
2040 list_add(&info->link, &domain->devices);
2041 list_add(&info->global, &device_domain_list);
Jiri Slaby00dfff72010-06-14 17:17:32 +02002042 spin_unlock_irqrestore(&device_domain_lock, flags);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002043 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002044 }
2045
2046found_domain:
2047 info = alloc_devinfo_mem();
2048 if (!info)
2049 goto error;
David Woodhouse276dbf992009-04-04 01:45:37 +01002050 info->segment = segment;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002051 info->bus = pdev->bus->number;
2052 info->devfn = pdev->devfn;
2053 info->dev = pdev;
2054 info->domain = domain;
2055 spin_lock_irqsave(&device_domain_lock, flags);
2056 /* somebody is fast */
2057 found = find_domain(pdev);
2058 if (found != NULL) {
2059 spin_unlock_irqrestore(&device_domain_lock, flags);
2060 if (found != domain) {
2061 domain_exit(domain);
2062 domain = found;
2063 }
2064 free_devinfo_mem(info);
2065 return domain;
2066 }
2067 list_add(&info->link, &domain->devices);
2068 list_add(&info->global, &device_domain_list);
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002069 pdev->dev.archdata.iommu = info;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002070 spin_unlock_irqrestore(&device_domain_lock, flags);
2071 return domain;
2072error:
2073 /* recheck it here, maybe others set it */
2074 return find_domain(pdev);
2075}
2076
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002077static int iommu_identity_mapping;
David Woodhousee0fc7e02009-09-30 09:12:17 -07002078#define IDENTMAP_ALL 1
2079#define IDENTMAP_GFX 2
2080#define IDENTMAP_AZALIA 4
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002081
David Woodhouseb2132032009-06-26 18:50:28 +01002082static int iommu_domain_identity_map(struct dmar_domain *domain,
2083 unsigned long long start,
2084 unsigned long long end)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002085{
David Woodhousec5395d52009-06-28 16:35:56 +01002086 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2087 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002088
David Woodhousec5395d52009-06-28 16:35:56 +01002089 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2090 dma_to_mm_pfn(last_vpfn))) {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002091 printk(KERN_ERR "IOMMU: reserve iova failed\n");
David Woodhouseb2132032009-06-26 18:50:28 +01002092 return -ENOMEM;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002093 }
2094
David Woodhousec5395d52009-06-28 16:35:56 +01002095 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2096 start, end, domain->id);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002097 /*
2098 * RMRR range might have overlap with physical memory range,
2099 * clear it first
2100 */
David Woodhousec5395d52009-06-28 16:35:56 +01002101 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002102
David Woodhousec5395d52009-06-28 16:35:56 +01002103 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2104 last_vpfn - first_vpfn + 1,
David Woodhouse61df7442009-06-28 11:55:58 +01002105 DMA_PTE_READ|DMA_PTE_WRITE);
David Woodhouseb2132032009-06-26 18:50:28 +01002106}
2107
2108static int iommu_prepare_identity_map(struct pci_dev *pdev,
2109 unsigned long long start,
2110 unsigned long long end)
2111{
2112 struct dmar_domain *domain;
2113 int ret;
2114
David Woodhousec7ab48d2009-06-26 19:10:36 +01002115 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
David Woodhouseb2132032009-06-26 18:50:28 +01002116 if (!domain)
2117 return -ENOMEM;
2118
David Woodhouse19943b02009-08-04 16:19:20 +01002119 /* For _hardware_ passthrough, don't bother. But for software
2120 passthrough, we do it anyway -- it may indicate a memory
2121 range which is reserved in E820, so which didn't get set
2122 up to start with in si_domain */
2123 if (domain == si_domain && hw_pass_through) {
2124 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2125 pci_name(pdev), start, end);
2126 return 0;
2127 }
2128
2129 printk(KERN_INFO
2130 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2131 pci_name(pdev), start, end);
David Woodhouse2ff729f2009-08-26 14:25:41 +01002132
David Woodhouse5595b522009-12-02 09:21:55 +00002133 if (end < start) {
2134 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2135 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2136 dmi_get_system_info(DMI_BIOS_VENDOR),
2137 dmi_get_system_info(DMI_BIOS_VERSION),
2138 dmi_get_system_info(DMI_PRODUCT_VERSION));
2139 ret = -EIO;
2140 goto error;
2141 }
2142
David Woodhouse2ff729f2009-08-26 14:25:41 +01002143 if (end >> agaw_to_width(domain->agaw)) {
2144 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2145 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2146 agaw_to_width(domain->agaw),
2147 dmi_get_system_info(DMI_BIOS_VENDOR),
2148 dmi_get_system_info(DMI_BIOS_VERSION),
2149 dmi_get_system_info(DMI_PRODUCT_VERSION));
2150 ret = -EIO;
2151 goto error;
2152 }
David Woodhouse19943b02009-08-04 16:19:20 +01002153
David Woodhouseb2132032009-06-26 18:50:28 +01002154 ret = iommu_domain_identity_map(domain, start, end);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002155 if (ret)
2156 goto error;
2157
2158 /* context entry init */
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002159 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
David Woodhouseb2132032009-06-26 18:50:28 +01002160 if (ret)
2161 goto error;
2162
2163 return 0;
2164
2165 error:
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002166 domain_exit(domain);
2167 return ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002168}
2169
2170static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2171 struct pci_dev *pdev)
2172{
Keshavamurthy, Anil S358dd8a2007-10-21 16:41:59 -07002173 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002174 return 0;
2175 return iommu_prepare_identity_map(pdev, rmrr->base_address,
David Woodhouse70e535d2011-05-31 00:22:52 +01002176 rmrr->end_address);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002177}
2178
Suresh Siddhad3f13812011-08-23 17:05:25 -07002179#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002180static inline void iommu_prepare_isa(void)
2181{
2182 struct pci_dev *pdev;
2183 int ret;
2184
2185 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2186 if (!pdev)
2187 return;
2188
David Woodhousec7ab48d2009-06-26 19:10:36 +01002189 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
David Woodhouse70e535d2011-05-31 00:22:52 +01002190 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002191
2192 if (ret)
David Woodhousec7ab48d2009-06-26 19:10:36 +01002193 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2194 "floppy might not work\n");
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002195
2196}
2197#else
2198static inline void iommu_prepare_isa(void)
2199{
2200 return;
2201}
Suresh Siddhad3f13812011-08-23 17:05:25 -07002202#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002203
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002204static int md_domain_init(struct dmar_domain *domain, int guest_width);
David Woodhousec7ab48d2009-06-26 19:10:36 +01002205
2206static int __init si_domain_work_fn(unsigned long start_pfn,
2207 unsigned long end_pfn, void *datax)
2208{
2209 int *ret = datax;
2210
2211 *ret = iommu_domain_identity_map(si_domain,
2212 (uint64_t)start_pfn << PAGE_SHIFT,
2213 (uint64_t)end_pfn << PAGE_SHIFT);
2214 return *ret;
2215
2216}
2217
Matt Kraai071e1372009-08-23 22:30:22 -07002218static int __init si_domain_init(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002219{
2220 struct dmar_drhd_unit *drhd;
2221 struct intel_iommu *iommu;
David Woodhousec7ab48d2009-06-26 19:10:36 +01002222 int nid, ret = 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002223
2224 si_domain = alloc_domain();
2225 if (!si_domain)
2226 return -EFAULT;
2227
David Woodhousec7ab48d2009-06-26 19:10:36 +01002228 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002229
2230 for_each_active_iommu(iommu, drhd) {
2231 ret = iommu_attach_domain(si_domain, iommu);
2232 if (ret) {
2233 domain_exit(si_domain);
2234 return -EFAULT;
2235 }
2236 }
2237
2238 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2239 domain_exit(si_domain);
2240 return -EFAULT;
2241 }
2242
2243 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2244
David Woodhouse19943b02009-08-04 16:19:20 +01002245 if (hw)
2246 return 0;
2247
David Woodhousec7ab48d2009-06-26 19:10:36 +01002248 for_each_online_node(nid) {
2249 work_with_active_regions(nid, si_domain_work_fn, &ret);
2250 if (ret)
2251 return ret;
2252 }
2253
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002254 return 0;
2255}
2256
2257static void domain_remove_one_dev_info(struct dmar_domain *domain,
2258 struct pci_dev *pdev);
2259static int identity_mapping(struct pci_dev *pdev)
2260{
2261 struct device_domain_info *info;
2262
2263 if (likely(!iommu_identity_mapping))
2264 return 0;
2265
Mike Traviscb452a42011-05-28 13:15:03 -05002266 info = pdev->dev.archdata.iommu;
2267 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2268 return (info->domain == si_domain);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002269
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002270 return 0;
2271}
2272
2273static int domain_add_dev_info(struct dmar_domain *domain,
David Woodhouse5fe60f42009-08-09 10:53:41 +01002274 struct pci_dev *pdev,
2275 int translation)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002276{
2277 struct device_domain_info *info;
2278 unsigned long flags;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002279 int ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002280
2281 info = alloc_devinfo_mem();
2282 if (!info)
2283 return -ENOMEM;
2284
David Woodhouse5fe60f42009-08-09 10:53:41 +01002285 ret = domain_context_mapping(domain, pdev, translation);
2286 if (ret) {
2287 free_devinfo_mem(info);
2288 return ret;
2289 }
2290
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002291 info->segment = pci_domain_nr(pdev->bus);
2292 info->bus = pdev->bus->number;
2293 info->devfn = pdev->devfn;
2294 info->dev = pdev;
2295 info->domain = domain;
2296
2297 spin_lock_irqsave(&device_domain_lock, flags);
2298 list_add(&info->link, &domain->devices);
2299 list_add(&info->global, &device_domain_list);
2300 pdev->dev.archdata.iommu = info;
2301 spin_unlock_irqrestore(&device_domain_lock, flags);
2302
2303 return 0;
2304}
2305
David Woodhouse6941af22009-07-04 18:24:27 +01002306static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2307{
David Woodhousee0fc7e02009-09-30 09:12:17 -07002308 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2309 return 1;
2310
2311 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2312 return 1;
2313
2314 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2315 return 0;
David Woodhouse6941af22009-07-04 18:24:27 +01002316
David Woodhouse3dfc8132009-07-04 19:11:08 +01002317 /*
2318 * We want to start off with all devices in the 1:1 domain, and
2319 * take them out later if we find they can't access all of memory.
2320 *
2321 * However, we can't do this for PCI devices behind bridges,
2322 * because all PCI devices behind the same bridge will end up
2323 * with the same source-id on their transactions.
2324 *
2325 * Practically speaking, we can't change things around for these
2326 * devices at run-time, because we can't be sure there'll be no
2327 * DMA transactions in flight for any of their siblings.
2328 *
2329 * So PCI devices (unless they're on the root bus) as well as
2330 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2331 * the 1:1 domain, just in _case_ one of their siblings turns out
2332 * not to be able to map all of memory.
2333 */
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002334 if (!pci_is_pcie(pdev)) {
David Woodhouse3dfc8132009-07-04 19:11:08 +01002335 if (!pci_is_root_bus(pdev->bus))
2336 return 0;
2337 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2338 return 0;
2339 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2340 return 0;
2341
2342 /*
2343 * At boot time, we don't yet know if devices will be 64-bit capable.
2344 * Assume that they will -- if they turn out not to be, then we can
2345 * take them out of the 1:1 domain later.
2346 */
Chris Wright8fcc5372011-05-28 13:15:02 -05002347 if (!startup) {
2348 /*
2349 * If the device's dma_mask is less than the system's memory
2350 * size then this is not a candidate for identity mapping.
2351 */
2352 u64 dma_mask = pdev->dma_mask;
2353
2354 if (pdev->dev.coherent_dma_mask &&
2355 pdev->dev.coherent_dma_mask < dma_mask)
2356 dma_mask = pdev->dev.coherent_dma_mask;
2357
2358 return dma_mask >= dma_get_required_mask(&pdev->dev);
2359 }
David Woodhouse6941af22009-07-04 18:24:27 +01002360
2361 return 1;
2362}
2363
Matt Kraai071e1372009-08-23 22:30:22 -07002364static int __init iommu_prepare_static_identity_mapping(int hw)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002365{
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002366 struct pci_dev *pdev = NULL;
2367 int ret;
2368
David Woodhouse19943b02009-08-04 16:19:20 +01002369 ret = si_domain_init(hw);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002370 if (ret)
2371 return -EFAULT;
2372
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002373 for_each_pci_dev(pdev) {
Mike Travis825507d2011-05-28 13:15:06 -05002374 /* Skip Host/PCI Bridge devices */
2375 if (IS_BRIDGE_HOST_DEVICE(pdev))
2376 continue;
David Woodhouse6941af22009-07-04 18:24:27 +01002377 if (iommu_should_identity_map(pdev, 1)) {
David Woodhouse19943b02009-08-04 16:19:20 +01002378 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2379 hw ? "hardware" : "software", pci_name(pdev));
David Woodhousec7ab48d2009-06-26 19:10:36 +01002380
David Woodhouse5fe60f42009-08-09 10:53:41 +01002381 ret = domain_add_dev_info(si_domain, pdev,
David Woodhouse19943b02009-08-04 16:19:20 +01002382 hw ? CONTEXT_TT_PASS_THROUGH :
David Woodhouse62edf5d2009-07-04 10:59:46 +01002383 CONTEXT_TT_MULTI_LEVEL);
2384 if (ret)
2385 return ret;
David Woodhouse62edf5d2009-07-04 10:59:46 +01002386 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002387 }
2388
2389 return 0;
2390}
2391
Joseph Cihulab7792602011-05-03 00:08:37 -07002392static int __init init_dmars(void)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002393{
2394 struct dmar_drhd_unit *drhd;
2395 struct dmar_rmrr_unit *rmrr;
2396 struct pci_dev *pdev;
2397 struct intel_iommu *iommu;
Suresh Siddha9d783ba2009-03-16 17:04:55 -07002398 int i, ret;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002399
2400 /*
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002401 * for each drhd
2402 * allocate root
2403 * initialize and program root entry to not present
2404 * endfor
2405 */
2406 for_each_drhd_unit(drhd) {
mark gross5e0d2a62008-03-04 15:22:08 -08002407 g_num_of_iommus++;
2408 /*
2409 * lock not needed as this is only incremented in the single
2410 * threaded kernel __init code path all other access are read
2411 * only
2412 */
2413 }
2414
Weidong Hand9630fe2008-12-08 11:06:32 +08002415 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2416 GFP_KERNEL);
2417 if (!g_iommus) {
2418 printk(KERN_ERR "Allocating global iommu array failed\n");
2419 ret = -ENOMEM;
2420 goto error;
2421 }
2422
mark gross80b20dd2008-04-18 13:53:58 -07002423 deferred_flush = kzalloc(g_num_of_iommus *
2424 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2425 if (!deferred_flush) {
mark gross5e0d2a62008-03-04 15:22:08 -08002426 ret = -ENOMEM;
2427 goto error;
2428 }
2429
mark gross5e0d2a62008-03-04 15:22:08 -08002430 for_each_drhd_unit(drhd) {
2431 if (drhd->ignored)
2432 continue;
Suresh Siddha1886e8a2008-07-10 11:16:37 -07002433
2434 iommu = drhd->iommu;
Weidong Hand9630fe2008-12-08 11:06:32 +08002435 g_iommus[iommu->seq_id] = iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002436
Suresh Siddhae61d98d2008-07-10 11:16:35 -07002437 ret = iommu_init_domains(iommu);
2438 if (ret)
2439 goto error;
2440
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002441 /*
2442 * TBD:
2443 * we could share the same root & context tables
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002444 * among all IOMMU's. Need to Split it later.
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002445 */
2446 ret = iommu_alloc_root_entry(iommu);
2447 if (ret) {
2448 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2449 goto error;
2450 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002451 if (!ecap_pass_through(iommu->ecap))
David Woodhouse19943b02009-08-04 16:19:20 +01002452 hw_pass_through = 0;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002453 }
2454
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002455 /*
2456 * Start from the sane iommu hardware state.
2457 */
Youquan Songa77b67d2008-10-16 16:31:56 -07002458 for_each_drhd_unit(drhd) {
2459 if (drhd->ignored)
2460 continue;
2461
2462 iommu = drhd->iommu;
Suresh Siddha1531a6a2009-03-16 17:04:57 -07002463
2464 /*
2465 * If the queued invalidation is already initialized by us
2466 * (for example, while enabling interrupt-remapping) then
2467 * we got the things already rolling from a sane state.
2468 */
2469 if (iommu->qi)
2470 continue;
2471
2472 /*
2473 * Clear any previous faults.
2474 */
2475 dmar_fault(-1, iommu);
2476 /*
2477 * Disable queued invalidation if supported and already enabled
2478 * before OS handover.
2479 */
2480 dmar_disable_qi(iommu);
2481 }
2482
2483 for_each_drhd_unit(drhd) {
2484 if (drhd->ignored)
2485 continue;
2486
2487 iommu = drhd->iommu;
2488
Youquan Songa77b67d2008-10-16 16:31:56 -07002489 if (dmar_enable_qi(iommu)) {
2490 /*
2491 * Queued Invalidate not enabled, use Register Based
2492 * Invalidate
2493 */
2494 iommu->flush.flush_context = __iommu_flush_context;
2495 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002496 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002497 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002498 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002499 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002500 } else {
2501 iommu->flush.flush_context = qi_flush_context;
2502 iommu->flush.flush_iotlb = qi_flush_iotlb;
Yinghai Lu680a7522010-04-08 19:58:23 +01002503 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002504 "invalidation\n",
Yinghai Lu680a7522010-04-08 19:58:23 +01002505 iommu->seq_id,
FUJITA Tomonorib4e0f9e2008-11-19 13:53:42 +09002506 (unsigned long long)drhd->reg_base_addr);
Youquan Songa77b67d2008-10-16 16:31:56 -07002507 }
2508 }
2509
David Woodhouse19943b02009-08-04 16:19:20 +01002510 if (iommu_pass_through)
David Woodhousee0fc7e02009-09-30 09:12:17 -07002511 iommu_identity_mapping |= IDENTMAP_ALL;
2512
Suresh Siddhad3f13812011-08-23 17:05:25 -07002513#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
David Woodhousee0fc7e02009-09-30 09:12:17 -07002514 iommu_identity_mapping |= IDENTMAP_GFX;
David Woodhouse19943b02009-08-04 16:19:20 +01002515#endif
David Woodhousee0fc7e02009-09-30 09:12:17 -07002516
2517 check_tylersburg_isoch();
2518
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002519 /*
2520 * If pass through is not set or not enabled, setup context entries for
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002521 * identity mappings for rmrr, gfx, and isa and may fall back to static
2522 * identity mapping if iommu_identity_mapping is set.
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002523 */
David Woodhouse19943b02009-08-04 16:19:20 +01002524 if (iommu_identity_mapping) {
2525 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2526 if (ret) {
2527 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2528 goto error;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002529 }
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002530 }
David Woodhouse19943b02009-08-04 16:19:20 +01002531 /*
2532 * For each rmrr
2533 * for each dev attached to rmrr
2534 * do
2535 * locate drhd for dev, alloc domain for dev
2536 * allocate free domain
2537 * allocate page table entries for rmrr
2538 * if context not allocated for bus
2539 * allocate and init context
2540 * set present in root table for this bus
2541 * init context with domain, translation etc
2542 * endfor
2543 * endfor
2544 */
2545 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2546 for_each_rmrr_units(rmrr) {
2547 for (i = 0; i < rmrr->devices_cnt; i++) {
2548 pdev = rmrr->devices[i];
2549 /*
2550 * some BIOS lists non-exist devices in DMAR
2551 * table.
2552 */
2553 if (!pdev)
2554 continue;
2555 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2556 if (ret)
2557 printk(KERN_ERR
2558 "IOMMU: mapping reserved region failed\n");
2559 }
2560 }
2561
2562 iommu_prepare_isa();
Keshavamurthy, Anil S49a04292007-10-21 16:41:57 -07002563
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002564 /*
2565 * for each drhd
2566 * enable fault log
2567 * global invalidate context cache
2568 * global invalidate iotlb
2569 * enable translation
2570 */
2571 for_each_drhd_unit(drhd) {
Joseph Cihula51a63e62011-03-21 11:04:24 -07002572 if (drhd->ignored) {
2573 /*
2574 * we always have to disable PMRs or DMA may fail on
2575 * this device
2576 */
2577 if (force_on)
2578 iommu_disable_protect_mem_regions(drhd->iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002579 continue;
Joseph Cihula51a63e62011-03-21 11:04:24 -07002580 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002581 iommu = drhd->iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002582
2583 iommu_flush_write_buffer(iommu);
2584
Keshavamurthy, Anil S3460a6d2007-10-21 16:41:54 -07002585 ret = dmar_set_interrupt(iommu);
2586 if (ret)
2587 goto error;
2588
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002589 iommu_set_root_entry(iommu);
2590
David Woodhouse4c25a2c2009-05-10 17:16:06 +01002591 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002592 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
mark grossf8bab732008-02-08 04:18:38 -08002593
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002594 ret = iommu_enable_translation(iommu);
2595 if (ret)
2596 goto error;
David Woodhouseb94996c2009-09-19 15:28:12 -07002597
2598 iommu_disable_protect_mem_regions(iommu);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002599 }
2600
2601 return 0;
2602error:
2603 for_each_drhd_unit(drhd) {
2604 if (drhd->ignored)
2605 continue;
2606 iommu = drhd->iommu;
2607 free_iommu(iommu);
2608 }
Weidong Hand9630fe2008-12-08 11:06:32 +08002609 kfree(g_iommus);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002610 return ret;
2611}
2612
David Woodhouse5a5e02a2009-07-04 09:35:44 +01002613/* This takes a number of _MM_ pages, not VTD pages */
David Woodhouse875764d2009-06-28 21:20:51 +01002614static struct iova *intel_alloc_iova(struct device *dev,
2615 struct dmar_domain *domain,
2616 unsigned long nrpages, uint64_t dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002617{
2618 struct pci_dev *pdev = to_pci_dev(dev);
2619 struct iova *iova = NULL;
2620
David Woodhouse875764d2009-06-28 21:20:51 +01002621 /* Restrict dma_mask to the width that the iommu can handle */
2622 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2623
2624 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002625 /*
2626 * First try to allocate an io virtual address in
Yang Hongyang284901a2009-04-06 19:01:15 -07002627 * DMA_BIT_MASK(32) and if that fails then try allocating
Joe Perches36098012007-12-17 11:40:11 -08002628 * from higher range
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002629 */
David Woodhouse875764d2009-06-28 21:20:51 +01002630 iova = alloc_iova(&domain->iovad, nrpages,
2631 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2632 if (iova)
2633 return iova;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002634 }
David Woodhouse875764d2009-06-28 21:20:51 +01002635 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2636 if (unlikely(!iova)) {
2637 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2638 nrpages, pci_name(pdev));
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002639 return NULL;
2640 }
2641
2642 return iova;
2643}
2644
David Woodhouse147202a2009-07-07 19:43:20 +01002645static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002646{
2647 struct dmar_domain *domain;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002648 int ret;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002649
2650 domain = get_domain_for_dev(pdev,
2651 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2652 if (!domain) {
2653 printk(KERN_ERR
2654 "Allocating domain for %s failed", pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002655 return NULL;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002656 }
2657
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002658 /* make sure context mapping is ok */
Weidong Han5331fe62008-12-08 23:00:00 +08002659 if (unlikely(!domain_context_mapped(pdev))) {
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07002660 ret = domain_context_mapping(domain, pdev,
2661 CONTEXT_TT_MULTI_LEVEL);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002662 if (ret) {
2663 printk(KERN_ERR
2664 "Domain context map for %s failed",
2665 pci_name(pdev));
Al Viro4fe05bb2007-10-29 04:51:16 +00002666 return NULL;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002667 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002668 }
2669
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002670 return domain;
2671}
2672
David Woodhouse147202a2009-07-07 19:43:20 +01002673static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2674{
2675 struct device_domain_info *info;
2676
2677 /* No lock here, assumes no domain exit in normal case */
2678 info = dev->dev.archdata.iommu;
2679 if (likely(info))
2680 return info->domain;
2681
2682 return __get_valid_domain_for_dev(dev);
2683}
2684
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002685static int iommu_dummy(struct pci_dev *pdev)
2686{
2687 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2688}
2689
2690/* Check if the pdev needs to go through non-identity map and unmap process.*/
David Woodhouse73676832009-07-04 14:08:36 +01002691static int iommu_no_mapping(struct device *dev)
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002692{
David Woodhouse73676832009-07-04 14:08:36 +01002693 struct pci_dev *pdev;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002694 int found;
2695
David Woodhouse73676832009-07-04 14:08:36 +01002696 if (unlikely(dev->bus != &pci_bus_type))
2697 return 1;
2698
2699 pdev = to_pci_dev(dev);
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002700 if (iommu_dummy(pdev))
2701 return 1;
2702
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002703 if (!iommu_identity_mapping)
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002704 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002705
2706 found = identity_mapping(pdev);
2707 if (found) {
David Woodhouse6941af22009-07-04 18:24:27 +01002708 if (iommu_should_identity_map(pdev, 0))
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002709 return 1;
2710 else {
2711 /*
2712 * 32 bit DMA is removed from si_domain and fall back
2713 * to non-identity mapping.
2714 */
2715 domain_remove_one_dev_info(si_domain, pdev);
2716 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2717 pci_name(pdev));
2718 return 0;
2719 }
2720 } else {
2721 /*
2722 * In case of a detached 64 bit DMA device from vm, the device
2723 * is put into si_domain for identity mapping.
2724 */
David Woodhouse6941af22009-07-04 18:24:27 +01002725 if (iommu_should_identity_map(pdev, 0)) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002726 int ret;
David Woodhouse5fe60f42009-08-09 10:53:41 +01002727 ret = domain_add_dev_info(si_domain, pdev,
2728 hw_pass_through ?
2729 CONTEXT_TT_PASS_THROUGH :
2730 CONTEXT_TT_MULTI_LEVEL);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002731 if (!ret) {
2732 printk(KERN_INFO "64bit %s uses identity mapping\n",
2733 pci_name(pdev));
2734 return 1;
2735 }
2736 }
2737 }
2738
David Woodhouse1e4c64c2009-07-04 10:40:38 +01002739 return 0;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002740}
2741
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002742static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2743 size_t size, int dir, u64 dma_mask)
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002744{
2745 struct pci_dev *pdev = to_pci_dev(hwdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002746 struct dmar_domain *domain;
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002747 phys_addr_t start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002748 struct iova *iova;
2749 int prot = 0;
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002750 int ret;
Weidong Han8c11e792008-12-08 15:29:22 +08002751 struct intel_iommu *iommu;
Fenghua Yu33041ec2009-08-04 15:10:59 -07002752 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002753
2754 BUG_ON(dir == DMA_NONE);
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002755
David Woodhouse73676832009-07-04 14:08:36 +01002756 if (iommu_no_mapping(hwdev))
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002757 return paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002758
2759 domain = get_valid_domain_for_dev(pdev);
2760 if (!domain)
2761 return 0;
2762
Weidong Han8c11e792008-12-08 15:29:22 +08002763 iommu = domain_get_iommu(domain);
David Woodhouse88cb6a72009-06-28 15:03:06 +01002764 size = aligned_nrpages(paddr, size);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002765
Mike Travisc681d0b2011-05-28 13:15:05 -05002766 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002767 if (!iova)
2768 goto error;
2769
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002770 /*
2771 * Check if DMAR supports zero-length reads on write only
2772 * mappings..
2773 */
2774 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08002775 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002776 prot |= DMA_PTE_READ;
2777 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2778 prot |= DMA_PTE_WRITE;
2779 /*
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002780 * paddr - (paddr + size) might be partial page, we should map the whole
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002781 * page. Note: if two part of one page are separately mapped, we
Ingo Molnar6865f0d2008-04-22 11:09:04 +02002782 * might have two guest_addr mapping to the same host paddr, but this
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002783 * is not a big problem
2784 */
David Woodhouse0ab36de2009-06-28 14:01:43 +01002785 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
Fenghua Yu33041ec2009-08-04 15:10:59 -07002786 mm_to_dma_pfn(paddr_pfn), size, prot);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002787 if (ret)
2788 goto error;
2789
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002790 /* it's a non-present to present mapping. Only flush if caching mode */
2791 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03002792 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01002793 else
Weidong Han8c11e792008-12-08 15:29:22 +08002794 iommu_flush_write_buffer(iommu);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002795
David Woodhouse03d6a242009-06-28 15:33:46 +01002796 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2797 start_paddr += paddr & ~PAGE_MASK;
2798 return start_paddr;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002799
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002800error:
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002801 if (iova)
2802 __free_iova(&domain->iovad, iova);
David Woodhouse4cf2e752009-02-11 17:23:43 +00002803 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002804 pci_name(pdev), size, (unsigned long long)paddr, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002805 return 0;
2806}
2807
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002808static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2809 unsigned long offset, size_t size,
2810 enum dma_data_direction dir,
2811 struct dma_attrs *attrs)
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002812{
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002813 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2814 dir, to_pci_dev(dev)->dma_mask);
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002815}
2816
mark gross5e0d2a62008-03-04 15:22:08 -08002817static void flush_unmaps(void)
2818{
mark gross80b20dd2008-04-18 13:53:58 -07002819 int i, j;
mark gross5e0d2a62008-03-04 15:22:08 -08002820
mark gross5e0d2a62008-03-04 15:22:08 -08002821 timer_on = 0;
2822
2823 /* just flush them all */
2824 for (i = 0; i < g_num_of_iommus; i++) {
Weidong Hana2bb8452008-12-08 11:24:12 +08002825 struct intel_iommu *iommu = g_iommus[i];
2826 if (!iommu)
2827 continue;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002828
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002829 if (!deferred_flush[i].next)
2830 continue;
2831
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002832 /* In caching mode, global flushes turn emulation expensive */
2833 if (!cap_caching_mode(iommu->cap))
2834 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
Yu Zhao93a23a72009-05-18 13:51:37 +08002835 DMA_TLB_GLOBAL_FLUSH);
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002836 for (j = 0; j < deferred_flush[i].next; j++) {
Yu Zhao93a23a72009-05-18 13:51:37 +08002837 unsigned long mask;
2838 struct iova *iova = deferred_flush[i].iova[j];
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002839 struct dmar_domain *domain = deferred_flush[i].domain[j];
Yu Zhao93a23a72009-05-18 13:51:37 +08002840
Nadav Amit78d5f0f2010-04-08 23:00:41 +03002841 /* On real hardware multiple invalidations are expensive */
2842 if (cap_caching_mode(iommu->cap))
2843 iommu_flush_iotlb_psi(iommu, domain->id,
2844 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2845 else {
2846 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2847 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2848 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2849 }
Yu Zhao93a23a72009-05-18 13:51:37 +08002850 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
mark gross80b20dd2008-04-18 13:53:58 -07002851 }
Yu Zhao9dd2fe82009-05-18 13:51:36 +08002852 deferred_flush[i].next = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002853 }
2854
mark gross5e0d2a62008-03-04 15:22:08 -08002855 list_size = 0;
mark gross5e0d2a62008-03-04 15:22:08 -08002856}
2857
2858static void flush_unmaps_timeout(unsigned long data)
2859{
mark gross80b20dd2008-04-18 13:53:58 -07002860 unsigned long flags;
2861
2862 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002863 flush_unmaps();
mark gross80b20dd2008-04-18 13:53:58 -07002864 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
mark gross5e0d2a62008-03-04 15:22:08 -08002865}
2866
2867static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2868{
2869 unsigned long flags;
mark gross80b20dd2008-04-18 13:53:58 -07002870 int next, iommu_id;
Weidong Han8c11e792008-12-08 15:29:22 +08002871 struct intel_iommu *iommu;
mark gross5e0d2a62008-03-04 15:22:08 -08002872
2873 spin_lock_irqsave(&async_umap_flush_lock, flags);
mark gross80b20dd2008-04-18 13:53:58 -07002874 if (list_size == HIGH_WATER_MARK)
2875 flush_unmaps();
2876
Weidong Han8c11e792008-12-08 15:29:22 +08002877 iommu = domain_get_iommu(dom);
2878 iommu_id = iommu->seq_id;
Suresh Siddhac42d9f32008-07-10 11:16:36 -07002879
mark gross80b20dd2008-04-18 13:53:58 -07002880 next = deferred_flush[iommu_id].next;
2881 deferred_flush[iommu_id].domain[next] = dom;
2882 deferred_flush[iommu_id].iova[next] = iova;
2883 deferred_flush[iommu_id].next++;
mark gross5e0d2a62008-03-04 15:22:08 -08002884
2885 if (!timer_on) {
2886 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2887 timer_on = 1;
2888 }
2889 list_size++;
2890 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2891}
2892
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09002893static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2894 size_t size, enum dma_data_direction dir,
2895 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002896{
2897 struct pci_dev *pdev = to_pci_dev(dev);
2898 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002899 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002900 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002901 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002902
David Woodhouse73676832009-07-04 14:08:36 +01002903 if (iommu_no_mapping(dev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002904 return;
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07002905
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002906 domain = find_domain(pdev);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002907 BUG_ON(!domain);
2908
Weidong Han8c11e792008-12-08 15:29:22 +08002909 iommu = domain_get_iommu(domain);
2910
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002911 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
David Woodhouse85b98272009-07-01 19:27:53 +01002912 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2913 (unsigned long long)dev_addr))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002914 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002915
David Woodhoused794dc92009-06-28 00:27:49 +01002916 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2917 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002918
David Woodhoused794dc92009-06-28 00:27:49 +01002919 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2920 pci_name(pdev), start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002921
2922 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01002923 dma_pte_clear_range(domain, start_pfn, last_pfn);
2924
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002925 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01002926 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2927
mark gross5e0d2a62008-03-04 15:22:08 -08002928 if (intel_iommu_strict) {
David Woodhouse03d6a242009-06-28 15:33:46 +01002929 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03002930 last_pfn - start_pfn + 1, 0);
mark gross5e0d2a62008-03-04 15:22:08 -08002931 /* free iova */
2932 __free_iova(&domain->iovad, iova);
2933 } else {
2934 add_unmap(domain, iova);
2935 /*
2936 * queue up the release of the unmap to save the 1/6th of the
2937 * cpu used up by the iotlb flush operation...
2938 */
mark gross5e0d2a62008-03-04 15:22:08 -08002939 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002940}
2941
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002942static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2943 dma_addr_t *dma_handle, gfp_t flags)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002944{
2945 void *vaddr;
2946 int order;
2947
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002948 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002949 order = get_order(size);
Alex Williamsone8bb9102009-11-04 15:59:34 -07002950
2951 if (!iommu_no_mapping(hwdev))
2952 flags &= ~(GFP_DMA | GFP_DMA32);
2953 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2954 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2955 flags |= GFP_DMA;
2956 else
2957 flags |= GFP_DMA32;
2958 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002959
2960 vaddr = (void *)__get_free_pages(flags, order);
2961 if (!vaddr)
2962 return NULL;
2963 memset(vaddr, 0, size);
2964
FUJITA Tomonoribb9e6d62008-10-15 16:08:28 +09002965 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2966 DMA_BIDIRECTIONAL,
2967 hwdev->coherent_dma_mask);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002968 if (*dma_handle)
2969 return vaddr;
2970 free_pages((unsigned long)vaddr, order);
2971 return NULL;
2972}
2973
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002974static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2975 dma_addr_t dma_handle)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002976{
2977 int order;
2978
Fenghua Yu5b6985c2008-10-16 18:02:32 -07002979 size = PAGE_ALIGN(size);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002980 order = get_order(size);
2981
David Woodhouse0db9b7a2009-07-14 02:01:57 +01002982 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002983 free_pages((unsigned long)vaddr, order);
2984}
2985
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09002986static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2987 int nelems, enum dma_data_direction dir,
2988 struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002989{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002990 struct pci_dev *pdev = to_pci_dev(hwdev);
2991 struct dmar_domain *domain;
David Woodhoused794dc92009-06-28 00:27:49 +01002992 unsigned long start_pfn, last_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07002993 struct iova *iova;
Weidong Han8c11e792008-12-08 15:29:22 +08002994 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002995
David Woodhouse73676832009-07-04 14:08:36 +01002996 if (iommu_no_mapping(hwdev))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07002997 return;
2998
2999 domain = find_domain(pdev);
Weidong Han8c11e792008-12-08 15:29:22 +08003000 BUG_ON(!domain);
3001
3002 iommu = domain_get_iommu(domain);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003003
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003004 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
David Woodhouse85b98272009-07-01 19:27:53 +01003005 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3006 (unsigned long long)sglist[0].dma_address))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003007 return;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003008
David Woodhoused794dc92009-06-28 00:27:49 +01003009 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3010 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003011
3012 /* clear the whole page */
David Woodhoused794dc92009-06-28 00:27:49 +01003013 dma_pte_clear_range(domain, start_pfn, last_pfn);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003014
David Woodhoused794dc92009-06-28 00:27:49 +01003015 /* free page tables */
3016 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3017
David Woodhouseacea0012009-07-14 01:55:11 +01003018 if (intel_iommu_strict) {
3019 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
Nadav Amit82653632010-04-01 13:24:40 +03003020 last_pfn - start_pfn + 1, 0);
David Woodhouseacea0012009-07-14 01:55:11 +01003021 /* free iova */
3022 __free_iova(&domain->iovad, iova);
3023 } else {
3024 add_unmap(domain, iova);
3025 /*
3026 * queue up the release of the unmap to save the 1/6th of the
3027 * cpu used up by the iotlb flush operation...
3028 */
3029 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003030}
3031
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003032static int intel_nontranslate_map_sg(struct device *hddev,
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003033 struct scatterlist *sglist, int nelems, int dir)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003034{
3035 int i;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003036 struct scatterlist *sg;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003037
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003038 for_each_sg(sglist, sg, nelems, i) {
FUJITA Tomonori12d4d402007-10-23 09:32:25 +02003039 BUG_ON(!sg_page(sg));
David Woodhouse4cf2e752009-02-11 17:23:43 +00003040 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003041 sg->dma_length = sg->length;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003042 }
3043 return nelems;
3044}
3045
FUJITA Tomonorid7ab5c42009-01-28 21:53:18 +09003046static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3047 enum dma_data_direction dir, struct dma_attrs *attrs)
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003048{
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003049 int i;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003050 struct pci_dev *pdev = to_pci_dev(hwdev);
3051 struct dmar_domain *domain;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003052 size_t size = 0;
3053 int prot = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003054 struct iova *iova = NULL;
3055 int ret;
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003056 struct scatterlist *sg;
David Woodhouseb536d242009-06-28 14:49:31 +01003057 unsigned long start_vpfn;
Weidong Han8c11e792008-12-08 15:29:22 +08003058 struct intel_iommu *iommu;
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003059
3060 BUG_ON(dir == DMA_NONE);
David Woodhouse73676832009-07-04 14:08:36 +01003061 if (iommu_no_mapping(hwdev))
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003062 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003063
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003064 domain = get_valid_domain_for_dev(pdev);
3065 if (!domain)
3066 return 0;
3067
Weidong Han8c11e792008-12-08 15:29:22 +08003068 iommu = domain_get_iommu(domain);
3069
David Woodhouseb536d242009-06-28 14:49:31 +01003070 for_each_sg(sglist, sg, nelems, i)
David Woodhouse88cb6a72009-06-28 15:03:06 +01003071 size += aligned_nrpages(sg->offset, sg->length);
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003072
David Woodhouse5a5e02a2009-07-04 09:35:44 +01003073 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3074 pdev->dma_mask);
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003075 if (!iova) {
FUJITA Tomonoric03ab372007-10-21 16:42:00 -07003076 sglist->dma_length = 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003077 return 0;
3078 }
3079
3080 /*
3081 * Check if DMAR supports zero-length reads on write only
3082 * mappings..
3083 */
3084 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
Weidong Han8c11e792008-12-08 15:29:22 +08003085 !cap_zlr(iommu->cap))
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003086 prot |= DMA_PTE_READ;
3087 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3088 prot |= DMA_PTE_WRITE;
3089
David Woodhouseb536d242009-06-28 14:49:31 +01003090 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
David Woodhousee1605492009-06-29 11:17:38 +01003091
Fenghua Yuf5329592009-08-04 15:09:37 -07003092 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
David Woodhousee1605492009-06-29 11:17:38 +01003093 if (unlikely(ret)) {
3094 /* clear the page */
3095 dma_pte_clear_range(domain, start_vpfn,
3096 start_vpfn + size - 1);
3097 /* free page tables */
3098 dma_pte_free_pagetable(domain, start_vpfn,
3099 start_vpfn + size - 1);
3100 /* free iova */
3101 __free_iova(&domain->iovad, iova);
3102 return 0;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -07003103 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003104
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003105 /* it's a non-present to present mapping. Only flush if caching mode */
3106 if (cap_caching_mode(iommu->cap))
Nadav Amit82653632010-04-01 13:24:40 +03003107 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003108 else
Weidong Han8c11e792008-12-08 15:29:22 +08003109 iommu_flush_write_buffer(iommu);
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003110
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003111 return nelems;
3112}
3113
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003114static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3115{
3116 return !dma_addr;
3117}
3118
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09003119struct dma_map_ops intel_dma_ops = {
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003120 .alloc_coherent = intel_alloc_coherent,
3121 .free_coherent = intel_free_coherent,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003122 .map_sg = intel_map_sg,
3123 .unmap_sg = intel_unmap_sg,
FUJITA Tomonoriffbbef52009-01-05 23:47:26 +09003124 .map_page = intel_map_page,
3125 .unmap_page = intel_unmap_page,
FUJITA Tomonoridfb805e2009-01-28 21:53:17 +09003126 .mapping_error = intel_mapping_error,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003127};
3128
3129static inline int iommu_domain_cache_init(void)
3130{
3131 int ret = 0;
3132
3133 iommu_domain_cache = kmem_cache_create("iommu_domain",
3134 sizeof(struct dmar_domain),
3135 0,
3136 SLAB_HWCACHE_ALIGN,
3137
3138 NULL);
3139 if (!iommu_domain_cache) {
3140 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3141 ret = -ENOMEM;
3142 }
3143
3144 return ret;
3145}
3146
3147static inline int iommu_devinfo_cache_init(void)
3148{
3149 int ret = 0;
3150
3151 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3152 sizeof(struct device_domain_info),
3153 0,
3154 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003155 NULL);
3156 if (!iommu_devinfo_cache) {
3157 printk(KERN_ERR "Couldn't create devinfo cache\n");
3158 ret = -ENOMEM;
3159 }
3160
3161 return ret;
3162}
3163
3164static inline int iommu_iova_cache_init(void)
3165{
3166 int ret = 0;
3167
3168 iommu_iova_cache = kmem_cache_create("iommu_iova",
3169 sizeof(struct iova),
3170 0,
3171 SLAB_HWCACHE_ALIGN,
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003172 NULL);
3173 if (!iommu_iova_cache) {
3174 printk(KERN_ERR "Couldn't create iova cache\n");
3175 ret = -ENOMEM;
3176 }
3177
3178 return ret;
3179}
3180
3181static int __init iommu_init_mempool(void)
3182{
3183 int ret;
3184 ret = iommu_iova_cache_init();
3185 if (ret)
3186 return ret;
3187
3188 ret = iommu_domain_cache_init();
3189 if (ret)
3190 goto domain_error;
3191
3192 ret = iommu_devinfo_cache_init();
3193 if (!ret)
3194 return ret;
3195
3196 kmem_cache_destroy(iommu_domain_cache);
3197domain_error:
3198 kmem_cache_destroy(iommu_iova_cache);
3199
3200 return -ENOMEM;
3201}
3202
3203static void __init iommu_exit_mempool(void)
3204{
3205 kmem_cache_destroy(iommu_devinfo_cache);
3206 kmem_cache_destroy(iommu_domain_cache);
3207 kmem_cache_destroy(iommu_iova_cache);
3208
3209}
3210
Dan Williams556ab452010-07-23 15:47:56 -07003211static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3212{
3213 struct dmar_drhd_unit *drhd;
3214 u32 vtbar;
3215 int rc;
3216
3217 /* We know that this device on this chipset has its own IOMMU.
3218 * If we find it under a different IOMMU, then the BIOS is lying
3219 * to us. Hope that the IOMMU for this device is actually
3220 * disabled, and it needs no translation...
3221 */
3222 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3223 if (rc) {
3224 /* "can't" happen */
3225 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3226 return;
3227 }
3228 vtbar &= 0xffff0000;
3229
3230 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3231 drhd = dmar_find_matched_drhd_unit(pdev);
3232 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3233 TAINT_FIRMWARE_WORKAROUND,
3234 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3235 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3236}
3237DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3238
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003239static void __init init_no_remapping_devices(void)
3240{
3241 struct dmar_drhd_unit *drhd;
3242
3243 for_each_drhd_unit(drhd) {
3244 if (!drhd->include_all) {
3245 int i;
3246 for (i = 0; i < drhd->devices_cnt; i++)
3247 if (drhd->devices[i] != NULL)
3248 break;
3249 /* ignore DMAR unit if no pci devices exist */
3250 if (i == drhd->devices_cnt)
3251 drhd->ignored = 1;
3252 }
3253 }
3254
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003255 for_each_drhd_unit(drhd) {
3256 int i;
3257 if (drhd->ignored || drhd->include_all)
3258 continue;
3259
3260 for (i = 0; i < drhd->devices_cnt; i++)
3261 if (drhd->devices[i] &&
David Woodhousec0771df2011-10-14 20:59:46 +01003262 !IS_GFX_DEVICE(drhd->devices[i]))
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003263 break;
3264
3265 if (i < drhd->devices_cnt)
3266 continue;
3267
David Woodhousec0771df2011-10-14 20:59:46 +01003268 /* This IOMMU has *only* gfx devices. Either bypass it or
3269 set the gfx_mapped flag, as appropriate */
3270 if (dmar_map_gfx) {
3271 intel_iommu_gfx_mapped = 1;
3272 } else {
3273 drhd->ignored = 1;
3274 for (i = 0; i < drhd->devices_cnt; i++) {
3275 if (!drhd->devices[i])
3276 continue;
3277 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3278 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003279 }
3280 }
3281}
3282
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003283#ifdef CONFIG_SUSPEND
3284static int init_iommu_hw(void)
3285{
3286 struct dmar_drhd_unit *drhd;
3287 struct intel_iommu *iommu = NULL;
3288
3289 for_each_active_iommu(iommu, drhd)
3290 if (iommu->qi)
3291 dmar_reenable_qi(iommu);
3292
Joseph Cihulab7792602011-05-03 00:08:37 -07003293 for_each_iommu(iommu, drhd) {
3294 if (drhd->ignored) {
3295 /*
3296 * we always have to disable PMRs or DMA may fail on
3297 * this device
3298 */
3299 if (force_on)
3300 iommu_disable_protect_mem_regions(iommu);
3301 continue;
3302 }
3303
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003304 iommu_flush_write_buffer(iommu);
3305
3306 iommu_set_root_entry(iommu);
3307
3308 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003309 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003310 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003311 DMA_TLB_GLOBAL_FLUSH);
Joseph Cihulab7792602011-05-03 00:08:37 -07003312 if (iommu_enable_translation(iommu))
3313 return 1;
David Woodhouseb94996c2009-09-19 15:28:12 -07003314 iommu_disable_protect_mem_regions(iommu);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003315 }
3316
3317 return 0;
3318}
3319
3320static void iommu_flush_all(void)
3321{
3322 struct dmar_drhd_unit *drhd;
3323 struct intel_iommu *iommu;
3324
3325 for_each_active_iommu(iommu, drhd) {
3326 iommu->flush.flush_context(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003327 DMA_CCMD_GLOBAL_INVL);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003328 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
David Woodhouse1f0ef2a2009-05-10 19:58:49 +01003329 DMA_TLB_GLOBAL_FLUSH);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003330 }
3331}
3332
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003333static int iommu_suspend(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003334{
3335 struct dmar_drhd_unit *drhd;
3336 struct intel_iommu *iommu = NULL;
3337 unsigned long flag;
3338
3339 for_each_active_iommu(iommu, drhd) {
3340 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3341 GFP_ATOMIC);
3342 if (!iommu->iommu_state)
3343 goto nomem;
3344 }
3345
3346 iommu_flush_all();
3347
3348 for_each_active_iommu(iommu, drhd) {
3349 iommu_disable_translation(iommu);
3350
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003351 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003352
3353 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3354 readl(iommu->reg + DMAR_FECTL_REG);
3355 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3356 readl(iommu->reg + DMAR_FEDATA_REG);
3357 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3358 readl(iommu->reg + DMAR_FEADDR_REG);
3359 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3360 readl(iommu->reg + DMAR_FEUADDR_REG);
3361
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003362 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003363 }
3364 return 0;
3365
3366nomem:
3367 for_each_active_iommu(iommu, drhd)
3368 kfree(iommu->iommu_state);
3369
3370 return -ENOMEM;
3371}
3372
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003373static void iommu_resume(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003374{
3375 struct dmar_drhd_unit *drhd;
3376 struct intel_iommu *iommu = NULL;
3377 unsigned long flag;
3378
3379 if (init_iommu_hw()) {
Joseph Cihulab7792602011-05-03 00:08:37 -07003380 if (force_on)
3381 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3382 else
3383 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003384 return;
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003385 }
3386
3387 for_each_active_iommu(iommu, drhd) {
3388
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003389 raw_spin_lock_irqsave(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003390
3391 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3392 iommu->reg + DMAR_FECTL_REG);
3393 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3394 iommu->reg + DMAR_FEDATA_REG);
3395 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3396 iommu->reg + DMAR_FEADDR_REG);
3397 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3398 iommu->reg + DMAR_FEUADDR_REG);
3399
Thomas Gleixner1f5b3c32011-07-19 16:19:51 +02003400 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003401 }
3402
3403 for_each_active_iommu(iommu, drhd)
3404 kfree(iommu->iommu_state);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003405}
3406
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003407static struct syscore_ops iommu_syscore_ops = {
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003408 .resume = iommu_resume,
3409 .suspend = iommu_suspend,
3410};
3411
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003412static void __init init_iommu_pm_ops(void)
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003413{
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003414 register_syscore_ops(&iommu_syscore_ops);
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003415}
3416
3417#else
Rafael J. Wysocki99592ba2011-06-07 21:32:31 +02003418static inline void init_iommu_pm_ops(void) {}
Fenghua Yuf59c7b62009-03-27 14:22:42 -07003419#endif /* CONFIG_PM */
3420
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003421LIST_HEAD(dmar_rmrr_units);
3422
3423static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3424{
3425 list_add(&rmrr->list, &dmar_rmrr_units);
3426}
3427
3428
3429int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3430{
3431 struct acpi_dmar_reserved_memory *rmrr;
3432 struct dmar_rmrr_unit *rmrru;
3433
3434 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3435 if (!rmrru)
3436 return -ENOMEM;
3437
3438 rmrru->hdr = header;
3439 rmrr = (struct acpi_dmar_reserved_memory *)header;
3440 rmrru->base_address = rmrr->base_address;
3441 rmrru->end_address = rmrr->end_address;
3442
3443 dmar_register_rmrr_unit(rmrru);
3444 return 0;
3445}
3446
3447static int __init
3448rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3449{
3450 struct acpi_dmar_reserved_memory *rmrr;
3451 int ret;
3452
3453 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3454 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3455 ((void *)rmrr) + rmrr->header.length,
3456 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3457
3458 if (ret || (rmrru->devices_cnt == 0)) {
3459 list_del(&rmrru->list);
3460 kfree(rmrru);
3461 }
3462 return ret;
3463}
3464
3465static LIST_HEAD(dmar_atsr_units);
3466
3467int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3468{
3469 struct acpi_dmar_atsr *atsr;
3470 struct dmar_atsr_unit *atsru;
3471
3472 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3473 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3474 if (!atsru)
3475 return -ENOMEM;
3476
3477 atsru->hdr = hdr;
3478 atsru->include_all = atsr->flags & 0x1;
3479
3480 list_add(&atsru->list, &dmar_atsr_units);
3481
3482 return 0;
3483}
3484
3485static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3486{
3487 int rc;
3488 struct acpi_dmar_atsr *atsr;
3489
3490 if (atsru->include_all)
3491 return 0;
3492
3493 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3494 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3495 (void *)atsr + atsr->header.length,
3496 &atsru->devices_cnt, &atsru->devices,
3497 atsr->segment);
3498 if (rc || !atsru->devices_cnt) {
3499 list_del(&atsru->list);
3500 kfree(atsru);
3501 }
3502
3503 return rc;
3504}
3505
3506int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3507{
3508 int i;
3509 struct pci_bus *bus;
3510 struct acpi_dmar_atsr *atsr;
3511 struct dmar_atsr_unit *atsru;
3512
3513 dev = pci_physfn(dev);
3514
3515 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3516 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3517 if (atsr->segment == pci_domain_nr(dev->bus))
3518 goto found;
3519 }
3520
3521 return 0;
3522
3523found:
3524 for (bus = dev->bus; bus; bus = bus->parent) {
3525 struct pci_dev *bridge = bus->self;
3526
3527 if (!bridge || !pci_is_pcie(bridge) ||
3528 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3529 return 0;
3530
3531 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3532 for (i = 0; i < atsru->devices_cnt; i++)
3533 if (atsru->devices[i] == bridge)
3534 return 1;
3535 break;
3536 }
3537 }
3538
3539 if (atsru->include_all)
3540 return 1;
3541
3542 return 0;
3543}
3544
3545int dmar_parse_rmrr_atsr_dev(void)
3546{
3547 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3548 struct dmar_atsr_unit *atsr, *atsr_n;
3549 int ret = 0;
3550
3551 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3552 ret = rmrr_parse_dev(rmrr);
3553 if (ret)
3554 return ret;
3555 }
3556
3557 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3558 ret = atsr_parse_dev(atsr);
3559 if (ret)
3560 return ret;
3561 }
3562
3563 return ret;
3564}
3565
Fenghua Yu99dcade2009-11-11 07:23:06 -08003566/*
3567 * Here we only respond to action of unbound device from driver.
3568 *
3569 * Added device is not attached to its DMAR domain here yet. That will happen
3570 * when mapping the device to iova.
3571 */
3572static int device_notifier(struct notifier_block *nb,
3573 unsigned long action, void *data)
3574{
3575 struct device *dev = data;
3576 struct pci_dev *pdev = to_pci_dev(dev);
3577 struct dmar_domain *domain;
3578
David Woodhouse44cd6132009-12-02 10:18:30 +00003579 if (iommu_no_mapping(dev))
3580 return 0;
3581
Fenghua Yu99dcade2009-11-11 07:23:06 -08003582 domain = find_domain(pdev);
3583 if (!domain)
3584 return 0;
3585
Alex Williamsona97590e2011-03-04 14:52:16 -07003586 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
Fenghua Yu99dcade2009-11-11 07:23:06 -08003587 domain_remove_one_dev_info(domain, pdev);
3588
Alex Williamsona97590e2011-03-04 14:52:16 -07003589 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3590 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3591 list_empty(&domain->devices))
3592 domain_exit(domain);
3593 }
3594
Fenghua Yu99dcade2009-11-11 07:23:06 -08003595 return 0;
3596}
3597
3598static struct notifier_block device_nb = {
3599 .notifier_call = device_notifier,
3600};
3601
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003602int __init intel_iommu_init(void)
3603{
3604 int ret = 0;
3605
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003606 /* VT-d is required for a TXT/tboot launch, so enforce that */
3607 force_on = tboot_force_iommu();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003608
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003609 if (dmar_table_init()) {
3610 if (force_on)
3611 panic("tboot: Failed to initialize DMAR table\n");
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003612 return -ENODEV;
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003613 }
3614
Suresh Siddhac2c72862011-08-23 17:05:19 -07003615 if (dmar_dev_scope_init() < 0) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003616 if (force_on)
3617 panic("tboot: Failed to initialize DMAR device scope\n");
3618 return -ENODEV;
3619 }
Suresh Siddha1886e8a2008-07-10 11:16:37 -07003620
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003621 if (no_iommu || dmar_disabled)
Suresh Siddha2ae21012008-07-10 11:16:43 -07003622 return -ENODEV;
3623
Joseph Cihula51a63e62011-03-21 11:04:24 -07003624 if (iommu_init_mempool()) {
3625 if (force_on)
3626 panic("tboot: Failed to initialize iommu memory\n");
3627 return -ENODEV;
3628 }
3629
Suresh Siddha318fe7d2011-08-23 17:05:20 -07003630 if (list_empty(&dmar_rmrr_units))
3631 printk(KERN_INFO "DMAR: No RMRR found\n");
3632
3633 if (list_empty(&dmar_atsr_units))
3634 printk(KERN_INFO "DMAR: No ATSR found\n");
3635
Joseph Cihula51a63e62011-03-21 11:04:24 -07003636 if (dmar_init_reserved_ranges()) {
3637 if (force_on)
3638 panic("tboot: Failed to reserve iommu ranges\n");
3639 return -ENODEV;
3640 }
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003641
3642 init_no_remapping_devices();
3643
Joseph Cihulab7792602011-05-03 00:08:37 -07003644 ret = init_dmars();
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003645 if (ret) {
Joseph Cihulaa59b50e2009-06-30 19:31:10 -07003646 if (force_on)
3647 panic("tboot: Failed to initialize DMARs\n");
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003648 printk(KERN_ERR "IOMMU: dmar init failed\n");
3649 put_iova_domain(&reserved_iova_list);
3650 iommu_exit_mempool();
3651 return ret;
3652 }
3653 printk(KERN_INFO
3654 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3655
mark gross5e0d2a62008-03-04 15:22:08 -08003656 init_timer(&unmap_timer);
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09003657#ifdef CONFIG_SWIOTLB
3658 swiotlb = 0;
3659#endif
David Woodhouse19943b02009-08-04 16:19:20 +01003660 dma_ops = &intel_dma_ops;
Fenghua Yu4ed0d3e2009-04-24 17:30:20 -07003661
Rafael J. Wysocki134fac32011-03-23 22:16:14 +01003662 init_iommu_pm_ops();
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003663
Joerg Roedel4236d97d2011-09-06 17:56:07 +02003664 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01003665
Fenghua Yu99dcade2009-11-11 07:23:06 -08003666 bus_register_notifier(&pci_bus_type, &device_nb);
3667
Keshavamurthy, Anil Sba395922007-10-21 16:41:49 -07003668 return 0;
3669}
Keshavamurthy, Anil Se8204822007-10-21 16:41:55 -07003670
Han, Weidong3199aa62009-02-26 17:31:12 +08003671static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3672 struct pci_dev *pdev)
3673{
3674 struct pci_dev *tmp, *parent;
3675
3676 if (!iommu || !pdev)
3677 return;
3678
3679 /* dependent device detach */
3680 tmp = pci_find_upstream_pcie_bridge(pdev);
3681 /* Secondary interface's bus number and devfn 0 */
3682 if (tmp) {
3683 parent = pdev->bus->self;
3684 while (parent != tmp) {
3685 iommu_detach_dev(iommu, parent->bus->number,
David Woodhouse276dbf992009-04-04 01:45:37 +01003686 parent->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003687 parent = parent->bus->self;
3688 }
Stefan Assmann45e829e2009-12-03 06:49:24 -05003689 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
Han, Weidong3199aa62009-02-26 17:31:12 +08003690 iommu_detach_dev(iommu,
3691 tmp->subordinate->number, 0);
3692 else /* this is a legacy PCI bridge */
David Woodhouse276dbf992009-04-04 01:45:37 +01003693 iommu_detach_dev(iommu, tmp->bus->number,
3694 tmp->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003695 }
3696}
3697
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003698static void domain_remove_one_dev_info(struct dmar_domain *domain,
Weidong Hanc7151a82008-12-08 22:51:37 +08003699 struct pci_dev *pdev)
3700{
3701 struct device_domain_info *info;
3702 struct intel_iommu *iommu;
3703 unsigned long flags;
3704 int found = 0;
3705 struct list_head *entry, *tmp;
3706
David Woodhouse276dbf992009-04-04 01:45:37 +01003707 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3708 pdev->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003709 if (!iommu)
3710 return;
3711
3712 spin_lock_irqsave(&device_domain_lock, flags);
3713 list_for_each_safe(entry, tmp, &domain->devices) {
3714 info = list_entry(entry, struct device_domain_info, link);
Mike Habeck8519dc42011-05-28 13:15:07 -05003715 if (info->segment == pci_domain_nr(pdev->bus) &&
3716 info->bus == pdev->bus->number &&
Weidong Hanc7151a82008-12-08 22:51:37 +08003717 info->devfn == pdev->devfn) {
3718 list_del(&info->link);
3719 list_del(&info->global);
3720 if (info->dev)
3721 info->dev->dev.archdata.iommu = NULL;
3722 spin_unlock_irqrestore(&device_domain_lock, flags);
3723
Yu Zhao93a23a72009-05-18 13:51:37 +08003724 iommu_disable_dev_iotlb(info);
Weidong Hanc7151a82008-12-08 22:51:37 +08003725 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003726 iommu_detach_dependent_devices(iommu, pdev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003727 free_devinfo_mem(info);
3728
3729 spin_lock_irqsave(&device_domain_lock, flags);
3730
3731 if (found)
3732 break;
3733 else
3734 continue;
3735 }
3736
3737 /* if there is no other devices under the same iommu
3738 * owned by this domain, clear this iommu in iommu_bmp
3739 * update iommu count and coherency
3740 */
David Woodhouse276dbf992009-04-04 01:45:37 +01003741 if (iommu == device_to_iommu(info->segment, info->bus,
3742 info->devfn))
Weidong Hanc7151a82008-12-08 22:51:37 +08003743 found = 1;
3744 }
3745
Roland Dreier3e7abe22011-07-20 06:22:21 -07003746 spin_unlock_irqrestore(&device_domain_lock, flags);
3747
Weidong Hanc7151a82008-12-08 22:51:37 +08003748 if (found == 0) {
3749 unsigned long tmp_flags;
3750 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3751 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3752 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003753 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003754 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
Alex Williamsona97590e2011-03-04 14:52:16 -07003755
Alex Williamson9b4554b2011-05-24 12:19:04 -04003756 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3757 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3758 spin_lock_irqsave(&iommu->lock, tmp_flags);
3759 clear_bit(domain->id, iommu->domain_ids);
3760 iommu->domains[domain->id] = NULL;
3761 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3762 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003763 }
Weidong Hanc7151a82008-12-08 22:51:37 +08003764}
3765
3766static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3767{
3768 struct device_domain_info *info;
3769 struct intel_iommu *iommu;
3770 unsigned long flags1, flags2;
3771
3772 spin_lock_irqsave(&device_domain_lock, flags1);
3773 while (!list_empty(&domain->devices)) {
3774 info = list_entry(domain->devices.next,
3775 struct device_domain_info, link);
3776 list_del(&info->link);
3777 list_del(&info->global);
3778 if (info->dev)
3779 info->dev->dev.archdata.iommu = NULL;
3780
3781 spin_unlock_irqrestore(&device_domain_lock, flags1);
3782
Yu Zhao93a23a72009-05-18 13:51:37 +08003783 iommu_disable_dev_iotlb(info);
David Woodhouse276dbf992009-04-04 01:45:37 +01003784 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
Weidong Hanc7151a82008-12-08 22:51:37 +08003785 iommu_detach_dev(iommu, info->bus, info->devfn);
Han, Weidong3199aa62009-02-26 17:31:12 +08003786 iommu_detach_dependent_devices(iommu, info->dev);
Weidong Hanc7151a82008-12-08 22:51:37 +08003787
3788 /* clear this iommu in iommu_bmp, update iommu count
Sheng Yang58c610b2009-03-18 15:33:05 +08003789 * and capabilities
Weidong Hanc7151a82008-12-08 22:51:37 +08003790 */
3791 spin_lock_irqsave(&domain->iommu_lock, flags2);
3792 if (test_and_clear_bit(iommu->seq_id,
3793 &domain->iommu_bmp)) {
3794 domain->iommu_count--;
Sheng Yang58c610b2009-03-18 15:33:05 +08003795 domain_update_iommu_cap(domain);
Weidong Hanc7151a82008-12-08 22:51:37 +08003796 }
3797 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3798
3799 free_devinfo_mem(info);
3800 spin_lock_irqsave(&device_domain_lock, flags1);
3801 }
3802 spin_unlock_irqrestore(&device_domain_lock, flags1);
3803}
3804
Weidong Han5e98c4b2008-12-08 23:03:27 +08003805/* domain id for virtual machine, it won't be set in context */
3806static unsigned long vm_domid;
3807
3808static struct dmar_domain *iommu_alloc_vm_domain(void)
3809{
3810 struct dmar_domain *domain;
3811
3812 domain = alloc_domain_mem();
3813 if (!domain)
3814 return NULL;
3815
3816 domain->id = vm_domid++;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003817 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003818 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3819 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3820
3821 return domain;
3822}
3823
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003824static int md_domain_init(struct dmar_domain *domain, int guest_width)
Weidong Han5e98c4b2008-12-08 23:03:27 +08003825{
3826 int adjust_width;
3827
3828 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003829 spin_lock_init(&domain->iommu_lock);
3830
3831 domain_reserve_special_ranges(domain);
3832
3833 /* calculate AGAW */
3834 domain->gaw = guest_width;
3835 adjust_width = guestwidth_to_adjustwidth(guest_width);
3836 domain->agaw = width_to_agaw(adjust_width);
3837
3838 INIT_LIST_HEAD(&domain->devices);
3839
3840 domain->iommu_count = 0;
3841 domain->iommu_coherency = 0;
Sheng Yangc5b15252009-08-06 13:31:56 +08003842 domain->iommu_snooping = 0;
Youquan Song6dd9a7c2011-05-25 19:13:49 +01003843 domain->iommu_superpage = 0;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003844 domain->max_addr = 0;
Suresh Siddha4c923d42009-10-02 11:01:24 -07003845 domain->nid = -1;
Weidong Han5e98c4b2008-12-08 23:03:27 +08003846
3847 /* always allocate the top pgd */
Suresh Siddha4c923d42009-10-02 11:01:24 -07003848 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003849 if (!domain->pgd)
3850 return -ENOMEM;
3851 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3852 return 0;
3853}
3854
3855static void iommu_free_vm_domain(struct dmar_domain *domain)
3856{
3857 unsigned long flags;
3858 struct dmar_drhd_unit *drhd;
3859 struct intel_iommu *iommu;
3860 unsigned long i;
3861 unsigned long ndomains;
3862
3863 for_each_drhd_unit(drhd) {
3864 if (drhd->ignored)
3865 continue;
3866 iommu = drhd->iommu;
3867
3868 ndomains = cap_ndoms(iommu->cap);
Akinobu Mitaa45946a2010-03-11 14:04:08 -08003869 for_each_set_bit(i, iommu->domain_ids, ndomains) {
Weidong Han5e98c4b2008-12-08 23:03:27 +08003870 if (iommu->domains[i] == domain) {
3871 spin_lock_irqsave(&iommu->lock, flags);
3872 clear_bit(i, iommu->domain_ids);
3873 iommu->domains[i] = NULL;
3874 spin_unlock_irqrestore(&iommu->lock, flags);
3875 break;
3876 }
Weidong Han5e98c4b2008-12-08 23:03:27 +08003877 }
3878 }
3879}
3880
3881static void vm_domain_exit(struct dmar_domain *domain)
3882{
Weidong Han5e98c4b2008-12-08 23:03:27 +08003883 /* Domain 0 is reserved, so dont process it */
3884 if (!domain)
3885 return;
3886
3887 vm_domain_remove_all_dev_info(domain);
3888 /* destroy iovas */
3889 put_iova_domain(&domain->iovad);
Weidong Han5e98c4b2008-12-08 23:03:27 +08003890
3891 /* clear ptes */
David Woodhouse595badf2009-06-27 22:09:11 +01003892 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003893
3894 /* free page tables */
David Woodhoused794dc92009-06-28 00:27:49 +01003895 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
Weidong Han5e98c4b2008-12-08 23:03:27 +08003896
3897 iommu_free_vm_domain(domain);
3898 free_domain_mem(domain);
3899}
3900
Joerg Roedel5d450802008-12-03 14:52:32 +01003901static int intel_iommu_domain_init(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003902{
Joerg Roedel5d450802008-12-03 14:52:32 +01003903 struct dmar_domain *dmar_domain;
Kay, Allen M38717942008-09-09 18:37:29 +03003904
Joerg Roedel5d450802008-12-03 14:52:32 +01003905 dmar_domain = iommu_alloc_vm_domain();
3906 if (!dmar_domain) {
Kay, Allen M38717942008-09-09 18:37:29 +03003907 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003908 "intel_iommu_domain_init: dmar_domain == NULL\n");
3909 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003910 }
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003911 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
Kay, Allen M38717942008-09-09 18:37:29 +03003912 printk(KERN_ERR
Joerg Roedel5d450802008-12-03 14:52:32 +01003913 "intel_iommu_domain_init() failed\n");
3914 vm_domain_exit(dmar_domain);
3915 return -ENOMEM;
Kay, Allen M38717942008-09-09 18:37:29 +03003916 }
Allen Kay8140a952011-10-14 12:32:17 -07003917 domain_update_iommu_cap(dmar_domain);
Joerg Roedel5d450802008-12-03 14:52:32 +01003918 domain->priv = dmar_domain;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003919
Joerg Roedel5d450802008-12-03 14:52:32 +01003920 return 0;
Kay, Allen M38717942008-09-09 18:37:29 +03003921}
Kay, Allen M38717942008-09-09 18:37:29 +03003922
Joerg Roedel5d450802008-12-03 14:52:32 +01003923static void intel_iommu_domain_destroy(struct iommu_domain *domain)
Kay, Allen M38717942008-09-09 18:37:29 +03003924{
Joerg Roedel5d450802008-12-03 14:52:32 +01003925 struct dmar_domain *dmar_domain = domain->priv;
3926
3927 domain->priv = NULL;
3928 vm_domain_exit(dmar_domain);
Kay, Allen M38717942008-09-09 18:37:29 +03003929}
Kay, Allen M38717942008-09-09 18:37:29 +03003930
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003931static int intel_iommu_attach_device(struct iommu_domain *domain,
3932 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003933{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003934 struct dmar_domain *dmar_domain = domain->priv;
3935 struct pci_dev *pdev = to_pci_dev(dev);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003936 struct intel_iommu *iommu;
3937 int addr_width;
Kay, Allen M38717942008-09-09 18:37:29 +03003938
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003939 /* normally pdev is not mapped */
3940 if (unlikely(domain_context_mapped(pdev))) {
3941 struct dmar_domain *old_domain;
3942
3943 old_domain = find_domain(pdev);
3944 if (old_domain) {
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003945 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3946 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3947 domain_remove_one_dev_info(old_domain, pdev);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003948 else
3949 domain_remove_dev_info(old_domain);
3950 }
3951 }
3952
David Woodhouse276dbf992009-04-04 01:45:37 +01003953 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3954 pdev->devfn);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003955 if (!iommu)
3956 return -ENODEV;
3957
3958 /* check if this iommu agaw is sufficient for max mapped address */
3959 addr_width = agaw_to_width(iommu->agaw);
Tom Lyona99c47a2010-05-17 08:20:45 +01003960 if (addr_width > cap_mgaw(iommu->cap))
3961 addr_width = cap_mgaw(iommu->cap);
3962
3963 if (dmar_domain->max_addr > (1LL << addr_width)) {
3964 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003965 "sufficient for the mapped address (%llx)\n",
Tom Lyona99c47a2010-05-17 08:20:45 +01003966 __func__, addr_width, dmar_domain->max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003967 return -EFAULT;
3968 }
Tom Lyona99c47a2010-05-17 08:20:45 +01003969 dmar_domain->gaw = addr_width;
3970
3971 /*
3972 * Knock out extra levels of page tables if necessary
3973 */
3974 while (iommu->agaw < dmar_domain->agaw) {
3975 struct dma_pte *pte;
3976
3977 pte = dmar_domain->pgd;
3978 if (dma_pte_present(pte)) {
Sheng Yang25cbff12010-06-12 19:21:42 +08003979 dmar_domain->pgd = (struct dma_pte *)
3980 phys_to_virt(dma_pte_addr(pte));
Jan Kiszka7a661012010-11-02 08:05:51 +01003981 free_pgtable_page(pte);
Tom Lyona99c47a2010-05-17 08:20:45 +01003982 }
3983 dmar_domain->agaw--;
3984 }
Weidong Hanfe40f1e2008-12-08 23:10:23 +08003985
David Woodhouse5fe60f42009-08-09 10:53:41 +01003986 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003987}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08003988
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003989static void intel_iommu_detach_device(struct iommu_domain *domain,
3990 struct device *dev)
Kay, Allen M38717942008-09-09 18:37:29 +03003991{
Joerg Roedel4c5478c2008-12-03 14:58:24 +01003992 struct dmar_domain *dmar_domain = domain->priv;
3993 struct pci_dev *pdev = to_pci_dev(dev);
3994
Fenghua Yu2c2e2c32009-06-19 13:47:29 -07003995 domain_remove_one_dev_info(dmar_domain, pdev);
Kay, Allen M38717942008-09-09 18:37:29 +03003996}
Kay, Allen M38717942008-09-09 18:37:29 +03003997
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01003998static int intel_iommu_map(struct iommu_domain *domain,
3999 unsigned long iova, phys_addr_t hpa,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004000 size_t size, int iommu_prot)
Kay, Allen M38717942008-09-09 18:37:29 +03004001{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004002 struct dmar_domain *dmar_domain = domain->priv;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004003 u64 max_addr;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004004 int prot = 0;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004005 int ret;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004006
Joerg Roedeldde57a22008-12-03 15:04:09 +01004007 if (iommu_prot & IOMMU_READ)
4008 prot |= DMA_PTE_READ;
4009 if (iommu_prot & IOMMU_WRITE)
4010 prot |= DMA_PTE_WRITE;
Sheng Yang9cf06692009-03-18 15:33:07 +08004011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4012 prot |= DMA_PTE_SNP;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004013
David Woodhouse163cc522009-06-28 00:51:17 +01004014 max_addr = iova + size;
Joerg Roedeldde57a22008-12-03 15:04:09 +01004015 if (dmar_domain->max_addr < max_addr) {
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004016 u64 end;
4017
4018 /* check if minimum agaw is sufficient for mapped address */
Tom Lyon8954da12010-05-17 08:19:52 +01004019 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004020 if (end < max_addr) {
Tom Lyon8954da12010-05-17 08:19:52 +01004021 printk(KERN_ERR "%s: iommu width (%d) is not "
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004022 "sufficient for the mapped address (%llx)\n",
Tom Lyon8954da12010-05-17 08:19:52 +01004023 __func__, dmar_domain->gaw, max_addr);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004024 return -EFAULT;
4025 }
Joerg Roedeldde57a22008-12-03 15:04:09 +01004026 dmar_domain->max_addr = max_addr;
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004027 }
David Woodhousead051222009-06-28 14:22:28 +01004028 /* Round up size to next multiple of PAGE_SIZE, if it and
4029 the low bits of hpa would take us onto the next page */
David Woodhouse88cb6a72009-06-28 15:03:06 +01004030 size = aligned_nrpages(hpa, size);
David Woodhousead051222009-06-28 14:22:28 +01004031 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4032 hpa >> VTD_PAGE_SHIFT, size, prot);
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004033 return ret;
Kay, Allen M38717942008-09-09 18:37:29 +03004034}
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004035
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004036static size_t intel_iommu_unmap(struct iommu_domain *domain,
4037 unsigned long iova, size_t size)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004038{
Joerg Roedeldde57a22008-12-03 15:04:09 +01004039 struct dmar_domain *dmar_domain = domain->priv;
Allen Kay292827c2011-10-14 12:31:54 -07004040 int order;
Sheng Yang4b99d352009-07-08 11:52:52 +01004041
Allen Kay292827c2011-10-14 12:31:54 -07004042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
David Woodhouse163cc522009-06-28 00:51:17 +01004043 (iova + size - 1) >> VTD_PAGE_SHIFT);
Weidong Hanfe40f1e2008-12-08 23:10:23 +08004044
David Woodhouse163cc522009-06-28 00:51:17 +01004045 if (dmar_domain->max_addr == iova + size)
4046 dmar_domain->max_addr = iova;
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004047
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02004048 return PAGE_SIZE << order;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004049}
Kay, Allen M38717942008-09-09 18:37:29 +03004050
Joerg Roedeld14d6572008-12-03 15:06:57 +01004051static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4052 unsigned long iova)
Kay, Allen M38717942008-09-09 18:37:29 +03004053{
Joerg Roedeld14d6572008-12-03 15:06:57 +01004054 struct dmar_domain *dmar_domain = domain->priv;
Kay, Allen M38717942008-09-09 18:37:29 +03004055 struct dma_pte *pte;
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004056 u64 phys = 0;
Kay, Allen M38717942008-09-09 18:37:29 +03004057
Youquan Song6dd9a7c2011-05-25 19:13:49 +01004058 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
Kay, Allen M38717942008-09-09 18:37:29 +03004059 if (pte)
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004060 phys = dma_pte_addr(pte);
Kay, Allen M38717942008-09-09 18:37:29 +03004061
Weidong Hanfaa3d6f2008-12-08 23:09:29 +08004062 return phys;
Kay, Allen M38717942008-09-09 18:37:29 +03004063}
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004064
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004065static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4066 unsigned long cap)
4067{
4068 struct dmar_domain *dmar_domain = domain->priv;
4069
4070 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4071 return dmar_domain->iommu_snooping;
Tom Lyon323f99c2010-07-02 16:56:14 -04004072 if (cap == IOMMU_CAP_INTR_REMAP)
4073 return intr_remapping_enabled;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004074
4075 return 0;
4076}
4077
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004078static struct iommu_ops intel_iommu_ops = {
4079 .domain_init = intel_iommu_domain_init,
4080 .domain_destroy = intel_iommu_domain_destroy,
4081 .attach_dev = intel_iommu_attach_device,
4082 .detach_dev = intel_iommu_detach_device,
Joerg Roedelb146a1c9f2010-01-20 17:17:37 +01004083 .map = intel_iommu_map,
4084 .unmap = intel_iommu_unmap,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004085 .iova_to_phys = intel_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08004086 .domain_has_cap = intel_iommu_domain_has_cap,
Ohad Ben-Cohen6d1c56a2011-11-10 11:32:30 +02004087 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
Joerg Roedela8bcbb0d2008-12-03 15:14:02 +01004088};
David Woodhouse9af88142009-02-13 23:18:03 +00004089
4090static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4091{
4092 /*
4093 * Mobile 4 Series Chipset neglects to set RWBF capability,
4094 * but needs it:
4095 */
4096 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4097 rwbf_quirk = 1;
David Woodhouse2d9e6672010-06-15 10:57:57 +01004098
4099 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4100 if (dev->revision == 0x07) {
4101 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4102 dmar_map_gfx = 0;
4103 }
David Woodhouse9af88142009-02-13 23:18:03 +00004104}
4105
4106DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
David Woodhousee0fc7e02009-09-30 09:12:17 -07004107
Adam Jacksoneecfd572010-08-25 21:17:34 +01004108#define GGC 0x52
4109#define GGC_MEMORY_SIZE_MASK (0xf << 8)
4110#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4111#define GGC_MEMORY_SIZE_1M (0x1 << 8)
4112#define GGC_MEMORY_SIZE_2M (0x3 << 8)
4113#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4114#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4115#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4116#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4117
David Woodhouse9eecabc2010-09-21 22:28:23 +01004118static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4119{
4120 unsigned short ggc;
4121
Adam Jacksoneecfd572010-08-25 21:17:34 +01004122 if (pci_read_config_word(dev, GGC, &ggc))
David Woodhouse9eecabc2010-09-21 22:28:23 +01004123 return;
4124
Adam Jacksoneecfd572010-08-25 21:17:34 +01004125 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
David Woodhouse9eecabc2010-09-21 22:28:23 +01004126 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4127 dmar_map_gfx = 0;
David Woodhouse6fbcfb32011-09-25 19:11:14 -07004128 } else if (dmar_map_gfx) {
4129 /* we have to ensure the gfx device is idle before we flush */
4130 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4131 intel_iommu_strict = 1;
4132 }
David Woodhouse9eecabc2010-09-21 22:28:23 +01004133}
4134DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4135DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4136DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4137DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4138
David Woodhousee0fc7e02009-09-30 09:12:17 -07004139/* On Tylersburg chipsets, some BIOSes have been known to enable the
4140 ISOCH DMAR unit for the Azalia sound device, but not give it any
4141 TLB entries, which causes it to deadlock. Check for that. We do
4142 this in a function called from init_dmars(), instead of in a PCI
4143 quirk, because we don't want to print the obnoxious "BIOS broken"
4144 message if VT-d is actually disabled.
4145*/
4146static void __init check_tylersburg_isoch(void)
4147{
4148 struct pci_dev *pdev;
4149 uint32_t vtisochctrl;
4150
4151 /* If there's no Azalia in the system anyway, forget it. */
4152 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4153 if (!pdev)
4154 return;
4155 pci_dev_put(pdev);
4156
4157 /* System Management Registers. Might be hidden, in which case
4158 we can't do the sanity check. But that's OK, because the
4159 known-broken BIOSes _don't_ actually hide it, so far. */
4160 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4161 if (!pdev)
4162 return;
4163
4164 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4165 pci_dev_put(pdev);
4166 return;
4167 }
4168
4169 pci_dev_put(pdev);
4170
4171 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4172 if (vtisochctrl & 1)
4173 return;
4174
4175 /* Drop all bits other than the number of TLB entries */
4176 vtisochctrl &= 0x1c;
4177
4178 /* If we have the recommended number of TLB entries (16), fine. */
4179 if (vtisochctrl == 0x10)
4180 return;
4181
4182 /* Zero TLB entries? You get to ride the short bus to school. */
4183 if (!vtisochctrl) {
4184 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4185 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4186 dmi_get_system_info(DMI_BIOS_VENDOR),
4187 dmi_get_system_info(DMI_BIOS_VERSION),
4188 dmi_get_system_info(DMI_PRODUCT_VERSION));
4189 iommu_identity_mapping |= IDENTMAP_AZALIA;
4190 return;
4191 }
4192
4193 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4194 vtisochctrl);
4195}